diff options
Diffstat (limited to 'fs/nfs/nfs4proc.c')
-rw-r--r-- | fs/nfs/nfs4proc.c | 1348 |
1 files changed, 1289 insertions, 59 deletions
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4674f8092da..92ce4351781 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -48,11 +48,14 @@ #include <linux/smp_lock.h> #include <linux/namei.h> #include <linux/mount.h> +#include <linux/module.h> +#include <linux/sunrpc/bc_xprt.h> #include "nfs4_fs.h" #include "delegation.h" #include "internal.h" #include "iostat.h" +#include "callback.h" #define NFSDBG_FACILITY NFSDBG_PROC @@ -247,7 +250,25 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, ret = nfs4_wait_clnt_recover(clp); if (ret == 0) exception->retry = 1; +#if !defined(CONFIG_NFS_V4_1) break; +#else /* !defined(CONFIG_NFS_V4_1) */ + if (!nfs4_has_session(server->nfs_client)) + break; + /* FALLTHROUGH */ + case -NFS4ERR_BADSESSION: + case -NFS4ERR_BADSLOT: + case -NFS4ERR_BAD_HIGH_SLOT: + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: + case -NFS4ERR_DEADSESSION: + case -NFS4ERR_SEQ_FALSE_RETRY: + case -NFS4ERR_SEQ_MISORDERED: + dprintk("%s ERROR: %d Reset session\n", __func__, + errorcode); + set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); + exception->retry = 1; + /* FALLTHROUGH */ +#endif /* !defined(CONFIG_NFS_V4_1) */ case -NFS4ERR_FILE_OPEN: case -NFS4ERR_GRACE: case -NFS4ERR_DELAY: @@ -271,6 +292,353 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp spin_unlock(&clp->cl_lock); } +#if defined(CONFIG_NFS_V4_1) + +/* + * nfs4_free_slot - free a slot and efficiently update slot table. + * + * freeing a slot is trivially done by clearing its respective bit + * in the bitmap. + * If the freed slotid equals highest_used_slotid we want to update it + * so that the server would be able to size down the slot table if needed, + * otherwise we know that the highest_used_slotid is still in use. + * When updating highest_used_slotid there may be "holes" in the bitmap + * so we need to scan down from highest_used_slotid to 0 looking for the now + * highest slotid in use. + * If none found, highest_used_slotid is set to -1. + */ +static void +nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid) +{ + int slotid = free_slotid; + + spin_lock(&tbl->slot_tbl_lock); + /* clear used bit in bitmap */ + __clear_bit(slotid, tbl->used_slots); + + /* update highest_used_slotid when it is freed */ + if (slotid == tbl->highest_used_slotid) { + slotid = find_last_bit(tbl->used_slots, tbl->max_slots); + if (slotid >= 0 && slotid < tbl->max_slots) + tbl->highest_used_slotid = slotid; + else + tbl->highest_used_slotid = -1; + } + rpc_wake_up_next(&tbl->slot_tbl_waitq); + spin_unlock(&tbl->slot_tbl_lock); + dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__, + free_slotid, tbl->highest_used_slotid); +} + +void nfs41_sequence_free_slot(const struct nfs_client *clp, + struct nfs4_sequence_res *res) +{ + struct nfs4_slot_table *tbl; + + if (!nfs4_has_session(clp)) { + dprintk("%s: No session\n", __func__); + return; + } + tbl = &clp->cl_session->fc_slot_table; + if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) { + dprintk("%s: No slot\n", __func__); + /* just wake up the next guy waiting since + * we may have not consumed a slot after all */ + rpc_wake_up_next(&tbl->slot_tbl_waitq); + return; + } + nfs4_free_slot(tbl, res->sr_slotid); + res->sr_slotid = NFS4_MAX_SLOT_TABLE; +} + +static void nfs41_sequence_done(struct nfs_client *clp, + struct nfs4_sequence_res *res, + int rpc_status) +{ + unsigned long timestamp; + struct nfs4_slot_table *tbl; + struct nfs4_slot *slot; + + /* + * sr_status remains 1 if an RPC level error occurred. The server + * may or may not have processed the sequence operation.. + * Proceed as if the server received and processed the sequence + * operation. + */ + if (res->sr_status == 1) + res->sr_status = NFS_OK; + + /* -ERESTARTSYS can result in skipping nfs41_sequence_setup */ + if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) + goto out; + + tbl = &clp->cl_session->fc_slot_table; + slot = tbl->slots + res->sr_slotid; + + if (res->sr_status == 0) { + /* Update the slot's sequence and clientid lease timer */ + ++slot->seq_nr; + timestamp = res->sr_renewal_time; + spin_lock(&clp->cl_lock); + if (time_before(clp->cl_last_renewal, timestamp)) + clp->cl_last_renewal = timestamp; + spin_unlock(&clp->cl_lock); + return; + } +out: + /* The session may be reset by one of the error handlers. */ + dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); + nfs41_sequence_free_slot(clp, res); +} + +/* + * nfs4_find_slot - efficiently look for a free slot + * + * nfs4_find_slot looks for an unset bit in the used_slots bitmap. + * If found, we mark the slot as used, update the highest_used_slotid, + * and respectively set up the sequence operation args. + * The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise. + * + * Note: must be called with under the slot_tbl_lock. + */ +static u8 +nfs4_find_slot(struct nfs4_slot_table *tbl, struct rpc_task *task) +{ + int slotid; + u8 ret_id = NFS4_MAX_SLOT_TABLE; + BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE); + + dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n", + __func__, tbl->used_slots[0], tbl->highest_used_slotid, + tbl->max_slots); + slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); + if (slotid >= tbl->max_slots) + goto out; + __set_bit(slotid, tbl->used_slots); + if (slotid > tbl->highest_used_slotid) + tbl->highest_used_slotid = slotid; + ret_id = slotid; +out: + dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", + __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id); + return ret_id; +} + +static int nfs4_recover_session(struct nfs4_session *session) +{ + struct nfs_client *clp = session->clp; + int ret; + + for (;;) { + ret = nfs4_wait_clnt_recover(clp); + if (ret != 0) + return ret; + if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) + break; + nfs4_schedule_state_manager(clp); + } + return 0; +} + +static int nfs41_setup_sequence(struct nfs4_session *session, + struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + int cache_reply, + struct rpc_task *task) +{ + struct nfs4_slot *slot; + struct nfs4_slot_table *tbl; + int status = 0; + u8 slotid; + + dprintk("--> %s\n", __func__); + /* slot already allocated? */ + if (res->sr_slotid != NFS4_MAX_SLOT_TABLE) + return 0; + + memset(res, 0, sizeof(*res)); + res->sr_slotid = NFS4_MAX_SLOT_TABLE; + tbl = &session->fc_slot_table; + + spin_lock(&tbl->slot_tbl_lock); + if (test_bit(NFS4CLNT_SESSION_SETUP, &session->clp->cl_state)) { + if (tbl->highest_used_slotid != -1) { + rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); + spin_unlock(&tbl->slot_tbl_lock); + dprintk("<-- %s: Session reset: draining\n", __func__); + return -EAGAIN; + } + + /* The slot table is empty; start the reset thread */ + dprintk("%s Session Reset\n", __func__); + spin_unlock(&tbl->slot_tbl_lock); + status = nfs4_recover_session(session); + if (status) + return status; + spin_lock(&tbl->slot_tbl_lock); + } + + slotid = nfs4_find_slot(tbl, task); + if (slotid == NFS4_MAX_SLOT_TABLE) { + rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); + spin_unlock(&tbl->slot_tbl_lock); + dprintk("<-- %s: no free slots\n", __func__); + return -EAGAIN; + } + spin_unlock(&tbl->slot_tbl_lock); + + slot = tbl->slots + slotid; + args->sa_session = session; + args->sa_slotid = slotid; + args->sa_cache_this = cache_reply; + + dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); + + res->sr_session = session; + res->sr_slotid = slotid; + res->sr_renewal_time = jiffies; + /* + * sr_status is only set in decode_sequence, and so will remain + * set to 1 if an rpc level failure occurs. + */ + res->sr_status = 1; + return 0; +} + +int nfs4_setup_sequence(struct nfs_client *clp, + struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + int cache_reply, + struct rpc_task *task) +{ + int ret = 0; + + dprintk("--> %s clp %p session %p sr_slotid %d\n", + __func__, clp, clp->cl_session, res->sr_slotid); + + if (!nfs4_has_session(clp)) + goto out; + ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply, + task); + if (ret != -EAGAIN) { + /* terminate rpc task */ + task->tk_status = ret; + task->tk_action = NULL; + } +out: + dprintk("<-- %s status=%d\n", __func__, ret); + return ret; +} + +struct nfs41_call_sync_data { + struct nfs_client *clp; + struct nfs4_sequence_args *seq_args; + struct nfs4_sequence_res *seq_res; + int cache_reply; +}; + +static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) +{ + struct nfs41_call_sync_data *data = calldata; + + dprintk("--> %s data->clp->cl_session %p\n", __func__, + data->clp->cl_session); + if (nfs4_setup_sequence(data->clp, data->seq_args, + data->seq_res, data->cache_reply, task)) + return; + rpc_call_start(task); +} + +static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) +{ + struct nfs41_call_sync_data *data = calldata; + + nfs41_sequence_done(data->clp, data->seq_res, task->tk_status); + nfs41_sequence_free_slot(data->clp, data->seq_res); +} + +struct rpc_call_ops nfs41_call_sync_ops = { + .rpc_call_prepare = nfs41_call_sync_prepare, + .rpc_call_done = nfs41_call_sync_done, +}; + +static int nfs4_call_sync_sequence(struct nfs_client *clp, + struct rpc_clnt *clnt, + struct rpc_message *msg, + struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + int cache_reply) +{ + int ret; + struct rpc_task *task; + struct nfs41_call_sync_data data = { + .clp = clp, + .seq_args = args, + .seq_res = res, + .cache_reply = cache_reply, + }; + struct rpc_task_setup task_setup = { + .rpc_client = clnt, + .rpc_message = msg, + .callback_ops = &nfs41_call_sync_ops, + .callback_data = &data + }; + + res->sr_slotid = NFS4_MAX_SLOT_TABLE; + task = rpc_run_task(&task_setup); + if (IS_ERR(task)) + ret = PTR_ERR(task); + else { + ret = task->tk_status; + rpc_put_task(task); + } + return ret; +} + +int _nfs4_call_sync_session(struct nfs_server *server, + struct rpc_message *msg, + struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + int cache_reply) +{ + return nfs4_call_sync_sequence(server->nfs_client, server->client, + msg, args, res, cache_reply); +} + +#endif /* CONFIG_NFS_V4_1 */ + +int _nfs4_call_sync(struct nfs_server *server, + struct rpc_message *msg, + struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + int cache_reply) +{ + args->sa_session = res->sr_session = NULL; + return rpc_call_sync(server->client, msg, 0); +} + +#define nfs4_call_sync(server, msg, args, res, cache_reply) \ + (server)->nfs_client->cl_call_sync((server), (msg), &(args)->seq_args, \ + &(res)->seq_res, (cache_reply)) + +static void nfs4_sequence_done(const struct nfs_server *server, + struct nfs4_sequence_res *res, int rpc_status) +{ +#ifdef CONFIG_NFS_V4_1 + if (nfs4_has_session(server->nfs_client)) + nfs41_sequence_done(server->nfs_client, res, rpc_status); +#endif /* CONFIG_NFS_V4_1 */ +} + +/* no restart, therefore free slot here */ +static void nfs4_sequence_done_free_slot(const struct nfs_server *server, + struct nfs4_sequence_res *res, + int rpc_status) +{ + nfs4_sequence_done(server, res, rpc_status); + nfs4_sequence_free_slot(server->nfs_client, res); +} + static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) { struct nfs_inode *nfsi = NFS_I(dir); @@ -312,6 +680,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p) p->o_res.server = p->o_arg.server; nfs_fattr_init(&p->f_attr); nfs_fattr_init(&p->dir_attr); + p->o_res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; } static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, @@ -804,16 +1173,30 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state err = _nfs4_open_delegation_recall(ctx, state, stateid); switch (err) { case 0: - return err; + case -ENOENT: + case -ESTALE: + goto out; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: /* Don't recall a delegation if it was lost */ nfs4_schedule_state_recovery(server->nfs_client); - return err; + goto out; + case -ERESTARTSYS: + /* + * The show must go on: exit, but mark the + * stateid as needing recovery. + */ + case -NFS4ERR_ADMIN_REVOKED: + case -NFS4ERR_BAD_STATEID: + nfs4_state_mark_reclaim_nograce(server->nfs_client, state); + case -ENOMEM: + err = 0; + goto out; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); +out: return err; } @@ -929,6 +1312,10 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); } data->timestamp = jiffies; + if (nfs4_setup_sequence(data->o_arg.server->nfs_client, + &data->o_arg.seq_args, + &data->o_res.seq_res, 1, task)) + return; rpc_call_start(task); return; out_no_action: @@ -941,6 +1328,10 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata) struct nfs4_opendata *data = calldata; data->rpc_status = task->tk_status; + + nfs4_sequence_done_free_slot(data->o_arg.server, &data->o_res.seq_res, + task->tk_status); + if (RPC_ASSASSINATED(task)) return; if (task->tk_status == 0) { @@ -1269,7 +1660,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, } else memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid)); - status = rpc_call_sync(server->client, &msg, 0); + status = nfs4_call_sync(server, &msg, &arg, &res, 1); if (status == 0 && state != NULL) renew_lease(server, timestamp); return status; @@ -1318,6 +1709,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) struct nfs4_state *state = calldata->state; struct nfs_server *server = NFS_SERVER(calldata->inode); + nfs4_sequence_done(server, &calldata->res.seq_res, task->tk_status); if (RPC_ASSASSINATED(task)) return; /* hmm. we are done with the inode, and in the process of freeing @@ -1336,10 +1728,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data) break; default: if (nfs4_async_handle_error(task, server, state) == -EAGAIN) { - rpc_restart_call(task); + nfs4_restart_rpc(task, server->nfs_client); return; } } + nfs4_sequence_free_slot(server->nfs_client, &calldata->res.seq_res); nfs_refresh_inode(calldata->inode, calldata->res.fattr); } @@ -1380,6 +1773,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) calldata->arg.fmode = FMODE_WRITE; } calldata->timestamp = jiffies; + if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client, + &calldata->arg.seq_args, &calldata->res.seq_res, + 1, task)) + return; rpc_call_start(task); } @@ -1419,13 +1816,15 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait) }; int status = -ENOMEM; - calldata = kmalloc(sizeof(*calldata), GFP_KERNEL); + calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); if (calldata == NULL) goto out; calldata->inode = state->inode; calldata->state = state; calldata->arg.fh = NFS_FH(state->inode); calldata->arg.stateid = &state->open_stateid; + if (nfs4_has_session(server->nfs_client)) + memset(calldata->arg.stateid->data, 0, 4); /* clear seqid */ /* Serialization for the sequence id */ calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid); if (calldata->arg.seqid == NULL) @@ -1435,6 +1834,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait) calldata->res.fattr = &calldata->fattr; calldata->res.seqid = calldata->arg.seqid; calldata->res.server = server; + calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; calldata->path.mnt = mntget(path->mnt); calldata->path.dentry = dget(path->dentry); @@ -1584,15 +1984,18 @@ void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { + struct nfs4_server_caps_arg args = { + .fhandle = fhandle, + }; struct nfs4_server_caps_res res = {}; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], - .rpc_argp = fhandle, + .rpc_argp = &args, .rpc_resp = &res, }; int status; - status = rpc_call_sync(server->client, &msg, 0); + status = nfs4_call_sync(server, &msg, &args, &res, 0); if (status == 0) { memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) @@ -1606,6 +2009,7 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; server->acl_bitmask = res.acl_bitmask; } + return status; } @@ -1637,8 +2041,15 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, .rpc_argp = &args, .rpc_resp = &res, }; + int status; + nfs_fattr_init(info->fattr); - return rpc_call_sync(server->client, &msg, 0); + status = nfs4_recover_expired_lease(server); + if (!status) + status = nfs4_check_client_ready(server->nfs_client); + if (!status) + status = nfs4_call_sync(server, &msg, &args, &res, 0); + return status; } static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, @@ -1728,7 +2139,7 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, }; nfs_fattr_init(fattr); - return rpc_call_sync(server->client, &msg, 0); + return nfs4_call_sync(server, &msg, &args, &res, 0); } static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) @@ -1812,7 +2223,7 @@ static int _nfs4_proc_lookupfh(struct nfs_server *server, const struct nfs_fh *d nfs_fattr_init(fattr); dprintk("NFS call lookupfh %s\n", name->name); - status = rpc_call_sync(server->client, &msg, 0); + status = nfs4_call_sync(server, &msg, &args, &res, 0); dprintk("NFS reply lookupfh: %d\n", status); return status; } @@ -1898,7 +2309,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry args.access |= NFS4_ACCESS_EXECUTE; } nfs_fattr_init(&fattr); - status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); + status = nfs4_call_sync(server, &msg, &args, &res, 0); if (!status) { entry->mask = 0; if (res.access & NFS4_ACCESS_READ) @@ -1957,13 +2368,14 @@ static int _nfs4_proc_readlink(struct inode *inode, struct page *page, .pglen = pglen, .pages = &page, }; + struct nfs4_readlink_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], .rpc_argp = &args, - .rpc_resp = NULL, + .rpc_resp = &res, }; - return rpc_call_sync(NFS_CLIENT(inode), &msg, 0); + return nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0); } static int nfs4_proc_readlink(struct inode *inode, struct page *page, @@ -2057,7 +2469,7 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) int status; nfs_fattr_init(&res.dir_attr); - status = rpc_call_sync(server->client, &msg, 0); + status = nfs4_call_sync(server, &msg, &args, &res, 1); if (status == 0) { update_changeattr(dir, &res.cinfo); nfs_post_op_update_inode(dir, &res.dir_attr); @@ -2092,8 +2504,10 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) { struct nfs_removeres *res = task->tk_msg.rpc_resp; + nfs4_sequence_done(res->server, &res->seq_res, task->tk_status); if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) return 0; + nfs4_sequence_free_slot(res->server->nfs_client, &res->seq_res); update_changeattr(dir, &res->cinfo); nfs_post_op_update_inode(dir, &res->dir_attr); return 1; @@ -2125,7 +2539,7 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, nfs_fattr_init(res.old_fattr); nfs_fattr_init(res.new_fattr); - status = rpc_call_sync(server->client, &msg, 0); + status = nfs4_call_sync(server, &msg, &arg, &res, 1); if (!status) { update_changeattr(old_dir, &res.old_cinfo); @@ -2174,7 +2588,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr * nfs_fattr_init(res.fattr); nfs_fattr_init(res.dir_attr); - status = rpc_call_sync(server->client, &msg, 0); + status = nfs4_call_sync(server, &msg, &arg, &res, 1); if (!status) { update_changeattr(dir, &res.cinfo); nfs_post_op_update_inode(dir, res.dir_attr); @@ -2235,7 +2649,8 @@ static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) { - int status = rpc_call_sync(NFS_CLIENT(dir), &data->msg, 0); + int status = nfs4_call_sync(NFS_SERVER(dir), &data->msg, + &data->arg, &data->res, 1); if (status == 0) { update_changeattr(dir, &data->res.dir_cinfo); nfs_post_op_update_inode(dir, data->res.dir_fattr); @@ -2344,7 +2759,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, (unsigned long long)cookie); nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); res.pgbase = args.pgbase; - status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); + status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0); if (status == 0) memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); @@ -2422,14 +2837,17 @@ static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, .fh = fhandle, .bitmask = server->attr_bitmask, }; + struct nfs4_statfs_res res = { + .fsstat = fsstat, + }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], .rpc_argp = &args, - .rpc_resp = fsstat, + .rpc_resp = &res, }; nfs_fattr_init(fsstat->fattr); - return rpc_call_sync(server->client, &msg, 0); + return nfs4_call_sync(server, &msg, &args, &res, 0); } static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) @@ -2451,13 +2869,16 @@ static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, .fh = fhandle, .bitmask = server->attr_bitmask, }; + struct nfs4_fsinfo_res res = { + .fsinfo = fsinfo, + }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], .rpc_argp = &args, - .rpc_resp = fsinfo, + .rpc_resp = &res, }; - return rpc_call_sync(server->client, &msg, 0); + return nfs4_call_sync(server, &msg, &args, &res, 0); } static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) @@ -2486,10 +2907,13 @@ static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle .fh = fhandle, .bitmask = server->attr_bitmask, }; + struct nfs4_pathconf_res res = { + .pathconf = pathconf, + }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], .rpc_argp = &args, - .rpc_resp = pathconf, + .rpc_resp = &res, }; /* None of the pathconf attributes are mandatory to implement */ @@ -2499,7 +2923,7 @@ static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle } nfs_fattr_init(pathconf->fattr); - return rpc_call_sync(server->client, &msg, 0); + return nfs4_call_sync(server, &msg, &args, &res, 0); } static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, @@ -2520,8 +2944,13 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) { struct nfs_server *server = NFS_SERVER(data->inode); + dprintk("--> %s\n", __func__); + + /* nfs4_sequence_free_slot called in the read rpc_call_done */ + nfs4_sequence_done(server, &data->res.seq_res, task->tk_status); + if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { - rpc_restart_call(task); + nfs4_restart_rpc(task, server->nfs_client); return -EAGAIN; } @@ -2541,8 +2970,12 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) { struct inode *inode = data->inode; + /* slot is freed in nfs_writeback_done */ + nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res, + task->tk_status); + if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { - rpc_restart_call(task); + nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client); return -EAGAIN; } if (task->tk_status >= 0) { @@ -2567,10 +3000,14 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data) { struct inode *inode = data->inode; + nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res, + task->tk_status); if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { - rpc_restart_call(task); + nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client); return -EAGAIN; } + nfs4_sequence_free_slot(NFS_SERVER(inode)->nfs_client, + &data->res.seq_res); nfs_refresh_inode(inode, data->res.fattr); return 0; } @@ -2603,6 +3040,9 @@ static void nfs4_renew_done(struct rpc_task *task, void *data) if (time_before(clp->cl_last_renewal,timestamp)) clp->cl_last_renewal = timestamp; spin_unlock(&clp->cl_lock); + dprintk("%s calling put_rpccred on rpc_cred %p\n", __func__, + task->tk_msg.rpc_cred); + put_rpccred(task->tk_msg.rpc_cred); } static const struct rpc_call_ops nfs4_renew_ops = { @@ -2742,12 +3182,14 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu .acl_pages = pages, .acl_len = buflen, }; - size_t resp_len = buflen; + struct nfs_getaclres res = { + .acl_len = buflen, + }; void *resp_buf; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], .rpc_argp = &args, - .rpc_resp = &resp_len, + .rpc_resp = &res, }; struct page *localpage = NULL; int ret; @@ -2761,26 +3203,26 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu return -ENOMEM; args.acl_pages[0] = localpage; args.acl_pgbase = 0; - resp_len = args.acl_len = PAGE_SIZE; + args.acl_len = PAGE_SIZE; } else { resp_buf = buf; buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase); } - ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); + ret = nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0); if (ret) goto out_free; - if (resp_len > args.acl_len) - nfs4_write_cached_acl(inode, NULL, resp_len); + if (res.acl_len > args.acl_len) + nfs4_write_cached_acl(inode, NULL, res.acl_len); else - nfs4_write_cached_acl(inode, resp_buf, resp_len); + nfs4_write_cached_acl(inode, resp_buf, res.acl_len); if (buf) { ret = -ERANGE; - if (resp_len > buflen) + if (res.acl_len > buflen) goto out_free; if (localpage) - memcpy(buf, resp_buf, resp_len); + memcpy(buf, resp_buf, res.acl_len); } - ret = resp_len; + ret = res.acl_len; out_free: if (localpage) __free_page(localpage); @@ -2810,8 +3252,6 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) ret = nfs_revalidate_inode(server, inode); if (ret < 0) return ret; - if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) - nfs_zap_acl_cache(inode); ret = nfs4_read_cached_acl(inode, buf, buflen); if (ret != -ENOENT) return ret; @@ -2827,10 +3267,11 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl .acl_pages = pages, .acl_len = buflen, }; + struct nfs_setaclres res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], .rpc_argp = &arg, - .rpc_resp = NULL, + .rpc_resp = &res, }; int ret; @@ -2838,7 +3279,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl return -EOPNOTSUPP; nfs_inode_return_delegation(inode); buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); - ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); + ret = nfs4_call_sync(server, &msg, &arg, &res, 1); nfs_access_zap_cache(inode); nfs_zap_acl_cache(inode); return ret; @@ -2857,10 +3298,8 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen } static int -nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) +_nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs_client *clp, struct nfs4_state *state) { - struct nfs_client *clp = server->nfs_client; - if (!clp || task->tk_status >= 0) return 0; switch(task->tk_status) { @@ -2879,8 +3318,23 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); task->tk_status = 0; return -EAGAIN; +#if defined(CONFIG_NFS_V4_1) + case -NFS4ERR_BADSESSION: + case -NFS4ERR_BADSLOT: + case -NFS4ERR_BAD_HIGH_SLOT: + case -NFS4ERR_DEADSESSION: + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: + case -NFS4ERR_SEQ_FALSE_RETRY: + case -NFS4ERR_SEQ_MISORDERED: + dprintk("%s ERROR %d, Reset session\n", __func__, + task->tk_status); + set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); + task->tk_status = 0; + return -EAGAIN; +#endif /* CONFIG_NFS_V4_1 */ case -NFS4ERR_DELAY: - nfs_inc_server_stats(server, NFSIOS_DELAY); + if (server) + nfs_inc_server_stats(server, NFSIOS_DELAY); case -NFS4ERR_GRACE: rpc_delay(task, NFS4_POLL_RETRY_MAX); task->tk_status = 0; @@ -2893,6 +3347,12 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, return 0; } +static int +nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) +{ + return _nfs4_async_handle_error(task, server, server->nfs_client, state); +} + int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred) { nfs4_verifier sc_verifier; @@ -3000,6 +3460,10 @@ struct nfs4_delegreturndata { static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) { struct nfs4_delegreturndata *data = calldata; + + nfs4_sequence_done_free_slot(data->res.server, &data->res.seq_res, + task->tk_status); + data->rpc_status = task->tk_status; if (data->rpc_status == 0) renew_lease(data->res.server, data->timestamp); @@ -3010,7 +3474,25 @@ static void nfs4_delegreturn_release(void *calldata) kfree(calldata); } +#if defined(CONFIG_NFS_V4_1) +static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) +{ + struct nfs4_delegreturndata *d_data; + + d_data = (struct nfs4_delegreturndata *)data; + + if (nfs4_setup_sequence(d_data->res.server->nfs_client, + &d_data->args.seq_args, + &d_data->res.seq_res, 1, task)) + return; + rpc_call_start(task); +} +#endif /* CONFIG_NFS_V4_1 */ + static const struct rpc_call_ops nfs4_delegreturn_ops = { +#if defined(CONFIG_NFS_V4_1) + .rpc_call_prepare = nfs4_delegreturn_prepare, +#endif /* CONFIG_NFS_V4_1 */ .rpc_call_done = nfs4_delegreturn_done, .rpc_release = nfs4_delegreturn_release, }; @@ -3032,7 +3514,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co }; int status = 0; - data = kmalloc(sizeof(*data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; data->args.fhandle = &data->fh; @@ -3042,6 +3524,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co memcpy(&data->stateid, stateid, sizeof(data->stateid)); data->res.fattr = &data->fattr; data->res.server = server; + data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; nfs_fattr_init(data->res.fattr); data->timestamp = jiffies; data->rpc_status = 0; @@ -3127,7 +3610,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock goto out; lsp = request->fl_u.nfs4_fl.owner; arg.lock_owner.id = lsp->ls_id.id; - status = rpc_call_sync(server->client, &msg, 0); + status = nfs4_call_sync(server, &msg, &arg, &res, 1); switch (status) { case 0: request->fl_type = F_UNLCK; @@ -3187,13 +3670,14 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, struct nfs4_unlockdata *p; struct inode *inode = lsp->ls_state->inode; - p = kmalloc(sizeof(*p), GFP_KERNEL); + p = kzalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) return NULL; p->arg.fh = NFS_FH(inode); p->arg.fl = &p->fl; p->arg.seqid = seqid; p->res.seqid = seqid; + p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; p->arg.stateid = &lsp->ls_stateid; p->lsp = lsp; atomic_inc(&lsp->ls_count); @@ -3217,6 +3701,8 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) { struct nfs4_unlockdata *calldata = data; + nfs4_sequence_done(calldata->server, &calldata->res.seq_res, + task->tk_status); if (RPC_ASSASSINATED(task)) return; switch (task->tk_status) { @@ -3233,8 +3719,11 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) break; default: if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) - rpc_restart_call(task); + nfs4_restart_rpc(task, + calldata->server->nfs_client); } + nfs4_sequence_free_slot(calldata->server->nfs_client, + &calldata->res.seq_res); } static void nfs4_locku_prepare(struct rpc_task *task, void *data) @@ -3249,6 +3738,10 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data) return; } calldata->timestamp = jiffies; + if (nfs4_setup_sequence(calldata->server->nfs_client, + &calldata->arg.seq_args, + &calldata->res.seq_res, 1, task)) + return; rpc_call_start(task); } @@ -3341,6 +3834,7 @@ struct nfs4_lockdata { unsigned long timestamp; int rpc_status; int cancelled; + struct nfs_server *server; }; static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, @@ -3366,7 +3860,9 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; p->arg.lock_owner.id = lsp->ls_id.id; p->res.lock_seqid = p->arg.lock_seqid; + p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; p->lsp = lsp; + p->server = server; atomic_inc(&lsp->ls_count); p->ctx = get_nfs_open_context(ctx); memcpy(&p->fl, fl, sizeof(p->fl)); @@ -3396,6 +3892,9 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) } else data->arg.new_lock_owner = 0; data->timestamp = jiffies; + if (nfs4_setup_sequence(data->server->nfs_client, &data->arg.seq_args, + &data->res.seq_res, 1, task)) + return; rpc_call_start(task); dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); } @@ -3406,6 +3905,9 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) dprintk("%s: begin!\n", __func__); + nfs4_sequence_done_free_slot(data->server, &data->res.seq_res, + task->tk_status); + data->rpc_status = task->tk_status; if (RPC_ASSASSINATED(task)) goto out; @@ -3487,8 +3989,6 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f ret = nfs4_wait_for_completion_rpc_task(task); if (ret == 0) { ret = data->rpc_status; - if (ret == -NFS4ERR_DENIED) - ret = -EAGAIN; } else data->cancelled = 1; rpc_put_task(task); @@ -3576,9 +4076,11 @@ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock * int err; do { + err = _nfs4_proc_setlk(state, cmd, request); + if (err == -NFS4ERR_DENIED) + err = -EAGAIN; err = nfs4_handle_exception(NFS_SERVER(state->inode), - _nfs4_proc_setlk(state, cmd, request), - &exception); + err, &exception); } while (exception.retry); return err; } @@ -3630,8 +4132,37 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) goto out; do { err = _nfs4_do_setlk(state, F_SETLK, fl, 0); - if (err != -NFS4ERR_DELAY) - break; + switch (err) { + default: + printk(KERN_ERR "%s: unhandled error %d.\n", + __func__, err); + case 0: + case -ESTALE: + goto out; + case -NFS4ERR_EXPIRED: + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_STALE_STATEID: + nfs4_schedule_state_recovery(server->nfs_client); + goto out; + case -ERESTARTSYS: + /* + * The show must go on: exit, but mark the + * stateid as needing recovery. + */ + case -NFS4ERR_ADMIN_REVOKED: + case -NFS4ERR_BAD_STATEID: + case -NFS4ERR_OPENMODE: + nfs4_state_mark_reclaim_nograce(server->nfs_client, state); + err = 0; + goto out; + case -ENOMEM: + case -NFS4ERR_DENIED: + /* kill_proc(fl->fl_pid, SIGLOST, 1); */ + err = 0; + goto out; + case -NFS4ERR_DELAY: + break; + } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); out: @@ -3706,10 +4237,13 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, .page = page, .bitmask = bitmask, }; + struct nfs4_fs_locations_res res = { + .fs_locations = fs_locations, + }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], .rpc_argp = &args, - .rpc_resp = fs_locations, + .rpc_resp = &res, }; int status; @@ -3717,24 +4251,720 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, nfs_fattr_init(&fs_locations->fattr); fs_locations->server = server; fs_locations->nlocations = 0; - status = rpc_call_sync(server->client, &msg, 0); + status = nfs4_call_sync(server, &msg, &args, &res, 0); nfs_fixup_referral_attributes(&fs_locations->fattr); dprintk("%s: returned status = %d\n", __func__, status); return status; } -struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = { +#ifdef CONFIG_NFS_V4_1 +/* + * nfs4_proc_exchange_id() + * + * Since the clientid has expired, all compounds using sessions + * associated with the stale clientid will be returning + * NFS4ERR_BADSESSION in the sequence operation, and will therefore + * be in some phase of session reset. + */ +static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) +{ + nfs4_verifier verifier; + struct nfs41_exchange_id_args args = { + .client = clp, + .flags = clp->cl_exchange_flags, + }; + struct nfs41_exchange_id_res res = { + .client = clp, + }; + int status; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], + .rpc_argp = &args, + .rpc_resp = &res, + .rpc_cred = cred, + }; + __be32 *p; + + dprintk("--> %s\n", __func__); + BUG_ON(clp == NULL); + + p = (u32 *)verifier.data; + *p++ = htonl((u32)clp->cl_boot_time.tv_sec); + *p = htonl((u32)clp->cl_boot_time.tv_nsec); + args.verifier = &verifier; + + while (1) { + args.id_len = scnprintf(args.id, sizeof(args.id), + "%s/%s %u", + clp->cl_ipaddr, + rpc_peeraddr2str(clp->cl_rpcclient, + RPC_DISPLAY_ADDR), + clp->cl_id_uniquifier); + + status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); + + if (status != NFS4ERR_CLID_INUSE) + break; + + if (signalled()) + break; + + if (++clp->cl_id_uniquifier == 0) + break; + } + + dprintk("<-- %s status= %d\n", __func__, status); + return status; +} + +struct nfs4_get_lease_time_data { + struct nfs4_get_lease_time_args *args; + struct nfs4_get_lease_time_res *res; + struct nfs_client *clp; +}; + +static void nfs4_get_lease_time_prepare(struct rpc_task *task, + void *calldata) +{ + int ret; + struct nfs4_get_lease_time_data *data = + (struct nfs4_get_lease_time_data *)calldata; + + dprintk("--> %s\n", __func__); + /* just setup sequence, do not trigger session recovery + since we're invoked within one */ + ret = nfs41_setup_sequence(data->clp->cl_session, + &data->args->la_seq_args, + &data->res->lr_seq_res, 0, task); + + BUG_ON(ret == -EAGAIN); + rpc_call_start(task); + dprintk("<-- %s\n", __func__); +} + +/* + * Called from nfs4_state_manager thread for session setup, so don't recover + * from sequence operation or clientid errors. + */ +static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) +{ + struct nfs4_get_lease_time_data *data = + (struct nfs4_get_lease_time_data *)calldata; + + dprintk("--> %s\n", __func__); + nfs41_sequence_done(data->clp, &data->res->lr_seq_res, task->tk_status); + switch (task->tk_status) { + case -NFS4ERR_DELAY: + case -NFS4ERR_GRACE: + dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); + rpc_delay(task, NFS4_POLL_RETRY_MIN); + task->tk_status = 0; + nfs4_restart_rpc(task, data->clp); + return; + } + nfs41_sequence_free_slot(data->clp, &data->res->lr_seq_res); + dprintk("<-- %s\n", __func__); +} + +struct rpc_call_ops nfs4_get_lease_time_ops = { + .rpc_call_prepare = nfs4_get_lease_time_prepare, + .rpc_call_done = nfs4_get_lease_time_done, +}; + +int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) +{ + struct rpc_task *task; + struct nfs4_get_lease_time_args args; + struct nfs4_get_lease_time_res res = { + .lr_fsinfo = fsinfo, + }; + struct nfs4_get_lease_time_data data = { + .args = &args, + .res = &res, + .clp = clp, + }; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], + .rpc_argp = &args, + .rpc_resp = &res, + }; + struct rpc_task_setup task_setup = { + .rpc_client = clp->cl_rpcclient, + .rpc_message = &msg, + .callback_ops = &nfs4_get_lease_time_ops, + .callback_data = &data + }; + int status; + + res.lr_seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; + dprintk("--> %s\n", __func__); + task = rpc_run_task(&task_setup); + + if (IS_ERR(task)) + status = PTR_ERR(task); + else { + status = task->tk_status; + rpc_put_task(task); + } + dprintk("<-- %s return %d\n", __func__, status); + + return status; +} + +/* + * Reset a slot table + */ +static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots, + int old_max_slots, int ivalue) +{ + int i; + int ret = 0; + + dprintk("--> %s: max_reqs=%u, tbl %p\n", __func__, max_slots, tbl); + + /* + * Until we have dynamic slot table adjustment, insist + * upon the same slot table size + */ + if (max_slots != old_max_slots) { + dprintk("%s reset slot table does't match old\n", + __func__); + ret = -EINVAL; /*XXX NFS4ERR_REQ_TOO_BIG ? */ + goto out; + } + spin_lock(&tbl->slot_tbl_lock); + for (i = 0; i < max_slots; ++i) + tbl->slots[i].seq_nr = ivalue; + tbl->highest_used_slotid = -1; + spin_unlock(&tbl->slot_tbl_lock); + dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, + tbl, tbl->slots, tbl->max_slots); +out: + dprintk("<-- %s: return %d\n", __func__, ret); + return ret; +} + +/* + * Reset the forechannel and backchannel slot tables + */ +static int nfs4_reset_slot_tables(struct nfs4_session *session) +{ + int status; + + status = nfs4_reset_slot_table(&session->fc_slot_table, + session->fc_attrs.max_reqs, + session->fc_slot_table.max_slots, + 1); + if (status) + return status; + + status = nfs4_reset_slot_table(&session->bc_slot_table, + session->bc_attrs.max_reqs, + session->bc_slot_table.max_slots, + 0); + return status; +} + +/* Destroy the slot table */ +static void nfs4_destroy_slot_tables(struct nfs4_session *session) +{ + if (session->fc_slot_table.slots != NULL) { + kfree(session->fc_slot_table.slots); + session->fc_slot_table.slots = NULL; + } + if (session->bc_slot_table.slots != NULL) { + kfree(session->bc_slot_table.slots); + session->bc_slot_table.slots = NULL; + } + return; +} + +/* + * Initialize slot table + */ +static int nfs4_init_slot_table(struct nfs4_slot_table *tbl, + int max_slots, int ivalue) +{ + int i; + struct nfs4_slot *slot; + int ret = -ENOMEM; + + BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE); + + dprintk("--> %s: max_reqs=%u\n", __func__, max_slots); + + slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL); + if (!slot) + goto out; + for (i = 0; i < max_slots; ++i) + slot[i].seq_nr = ivalue; + ret = 0; + + spin_lock(&tbl->slot_tbl_lock); + if (tbl->slots != NULL) { + spin_unlock(&tbl->slot_tbl_lock); + dprintk("%s: slot table already initialized. tbl=%p slots=%p\n", + __func__, tbl, tbl->slots); + WARN_ON(1); + goto out_free; + } + tbl->max_slots = max_slots; + tbl->slots = slot; + tbl->highest_used_slotid = -1; /* no slot is currently used */ + spin_unlock(&tbl->slot_tbl_lock); + dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, + tbl, tbl->slots, tbl->max_slots); +out: + dprintk("<-- %s: return %d\n", __func__, ret); + return ret; + +out_free: + kfree(slot); + goto out; +} + +/* + * Initialize the forechannel and backchannel tables + */ +static int nfs4_init_slot_tables(struct nfs4_session *session) +{ + int status; + + status = nfs4_init_slot_table(&session->fc_slot_table, + session->fc_attrs.max_reqs, 1); + if (status) + return status; + + status = nfs4_init_slot_table(&session->bc_slot_table, + session->bc_attrs.max_reqs, 0); + if (status) + nfs4_destroy_slot_tables(session); + + return status; +} + +struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) +{ + struct nfs4_session *session; + struct nfs4_slot_table *tbl; + + session = kzalloc(sizeof(struct nfs4_session), GFP_KERNEL); + if (!session) + return NULL; + + set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); + /* + * The create session reply races with the server back + * channel probe. Mark the client NFS_CS_SESSION_INITING + * so that the client back channel can find the + * nfs_client struct + */ + clp->cl_cons_state = NFS_CS_SESSION_INITING; + + tbl = &session->fc_slot_table; + spin_lock_init(&tbl->slot_tbl_lock); + rpc_init_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); + + tbl = &session->bc_slot_table; + spin_lock_init(&tbl->slot_tbl_lock); + rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); + + session->clp = clp; + return session; +} + +void nfs4_destroy_session(struct nfs4_session *session) +{ + nfs4_proc_destroy_session(session); + dprintk("%s Destroy backchannel for xprt %p\n", + __func__, session->clp->cl_rpcclient->cl_xprt); + xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt, + NFS41_BC_MIN_CALLBACKS); + nfs4_destroy_slot_tables(session); + kfree(session); +} + +/* + * Initialize the values to be used by the client in CREATE_SESSION + * If nfs4_init_session set the fore channel request and response sizes, + * use them. + * + * Set the back channel max_resp_sz_cached to zero to force the client to + * always set csa_cachethis to FALSE because the current implementation + * of the back channel DRC only supports caching the CB_SEQUENCE operation. + */ +static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) +{ + struct nfs4_session *session = args->client->cl_session; + unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz, + mxresp_sz = session->fc_attrs.max_resp_sz; + + if (mxrqst_sz == 0) + mxrqst_sz = NFS_MAX_FILE_IO_SIZE; + if (mxresp_sz == 0) + mxresp_sz = NFS_MAX_FILE_IO_SIZE; + /* Fore channel attributes */ + args->fc_attrs.headerpadsz = 0; + args->fc_attrs.max_rqst_sz = mxrqst_sz; + args->fc_attrs.max_resp_sz = mxresp_sz; + args->fc_attrs.max_resp_sz_cached = mxresp_sz; + args->fc_attrs.max_ops = NFS4_MAX_OPS; + args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs; + + dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " + "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", + __func__, + args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, + args->fc_attrs.max_resp_sz_cached, args->fc_attrs.max_ops, + args->fc_attrs.max_reqs); + + /* Back channel attributes */ + args->bc_attrs.headerpadsz = 0; + args->bc_attrs.max_rqst_sz = PAGE_SIZE; + args->bc_attrs.max_resp_sz = PAGE_SIZE; + args->bc_attrs.max_resp_sz_cached = 0; + args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; + args->bc_attrs.max_reqs = 1; + + dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " + "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", + __func__, + args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, + args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, + args->bc_attrs.max_reqs); +} + +static int _verify_channel_attr(char *chan, char *attr_name, u32 sent, u32 rcvd) +{ + if (rcvd <= sent) + return 0; + printk(KERN_WARNING "%s: Session INVALID: %s channel %s increased. " + "sent=%u rcvd=%u\n", __func__, chan, attr_name, sent, rcvd); + return -EINVAL; +} + +#define _verify_fore_channel_attr(_name_) \ + _verify_channel_attr("fore", #_name_, \ + args->fc_attrs._name_, \ + session->fc_attrs._name_) + +#define _verify_back_channel_attr(_name_) \ + _verify_channel_attr("back", #_name_, \ + args->bc_attrs._name_, \ + session->bc_attrs._name_) + +/* + * The server is not allowed to increase the fore channel header pad size, + * maximum response size, or maximum number of operations. + * + * The back channel attributes are only negotiatied down: We send what the + * (back channel) server insists upon. + */ +static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, + struct nfs4_session *session) +{ + int ret = 0; + + ret |= _verify_fore_channel_attr(headerpadsz); + ret |= _verify_fore_channel_attr(max_resp_sz); + ret |= _verify_fore_channel_attr(max_ops); + + ret |= _verify_back_channel_attr(headerpadsz); + ret |= _verify_back_channel_attr(max_rqst_sz); + ret |= _verify_back_channel_attr(max_resp_sz); + ret |= _verify_back_channel_attr(max_resp_sz_cached); + ret |= _verify_back_channel_attr(max_ops); + ret |= _verify_back_channel_attr(max_reqs); + + return ret; +} + +static int _nfs4_proc_create_session(struct nfs_client *clp) +{ + struct nfs4_session *session = clp->cl_session; + struct nfs41_create_session_args args = { + .client = clp, + .cb_program = NFS4_CALLBACK, + }; + struct nfs41_create_session_res res = { + .client = clp, + }; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], + .rpc_argp = &args, + .rpc_resp = &res, + }; + int status; + + nfs4_init_channel_attrs(&args); + args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); + + status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0); + + if (!status) + /* Verify the session's negotiated channel_attrs values */ + status = nfs4_verify_channel_attrs(&args, session); + if (!status) { + /* Increment the clientid slot sequence id */ + clp->cl_seqid++; + } + + return status; +} + +/* + * Issues a CREATE_SESSION operation to the server. + * It is the responsibility of the caller to verify the session is + * expired before calling this routine. + */ +int nfs4_proc_create_session(struct nfs_client *clp, int reset) +{ + int status; + unsigned *ptr; + struct nfs_fsinfo fsinfo; + struct nfs4_session *session = clp->cl_session; + + dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); + + status = _nfs4_proc_create_session(clp); + if (status) + goto out; + + /* Init or reset the fore channel */ + if (reset) + status = nfs4_reset_slot_tables(session); + else + status = nfs4_init_slot_tables(session); + dprintk("fore channel slot table initialization returned %d\n", status); + if (status) + goto out; + + ptr = (unsigned *)&session->sess_id.data[0]; + dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, + clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); + + if (reset) + /* Lease time is aleady set */ + goto out; + + /* Get the lease time */ + status = nfs4_proc_get_lease_time(clp, &fsinfo); + if (status == 0) { + /* Update lease time and schedule renewal */ + spin_lock(&clp->cl_lock); + clp->cl_lease_time = fsinfo.lease_time * HZ; + clp->cl_last_renewal = jiffies; + clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + spin_unlock(&clp->cl_lock); + + nfs4_schedule_state_renewal(clp); + } +out: + dprintk("<-- %s\n", __func__); + return status; +} + +/* + * Issue the over-the-wire RPC DESTROY_SESSION. + * The caller must serialize access to this routine. + */ +int nfs4_proc_destroy_session(struct nfs4_session *session) +{ + int status = 0; + struct rpc_message msg; + + dprintk("--> nfs4_proc_destroy_session\n"); + + /* session is still being setup */ + if (session->clp->cl_cons_state != NFS_CS_READY) + return status; + + msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION]; + msg.rpc_argp = session; + msg.rpc_resp = NULL; + msg.rpc_cred = NULL; + status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0); + + if (status) + printk(KERN_WARNING + "Got error %d from the server on DESTROY_SESSION. " + "Session has been destroyed regardless...\n", status); + + dprintk("<-- nfs4_proc_destroy_session\n"); + return status; +} + +/* + * Renew the cl_session lease. + */ +static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) +{ + struct nfs4_sequence_args args; + struct nfs4_sequence_res res; + + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], + .rpc_argp = &args, + .rpc_resp = &res, + .rpc_cred = cred, + }; + + args.sa_cache_this = 0; + + return nfs4_call_sync_sequence(clp, clp->cl_rpcclient, &msg, &args, + &res, 0); +} + +void nfs41_sequence_call_done(struct rpc_task *task, void *data) +{ + struct nfs_client *clp = (struct nfs_client *)data; + + nfs41_sequence_done(clp, task->tk_msg.rpc_resp, task->tk_status); + + if (task->tk_status < 0) { + dprintk("%s ERROR %d\n", __func__, task->tk_status); + + if (_nfs4_async_handle_error(task, NULL, clp, NULL) + == -EAGAIN) { + nfs4_restart_rpc(task, clp); + return; + } + } + nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp); + dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); + + put_rpccred(task->tk_msg.rpc_cred); + kfree(task->tk_msg.rpc_argp); + kfree(task->tk_msg.rpc_resp); + + dprintk("<-- %s\n", __func__); +} + +static void nfs41_sequence_prepare(struct rpc_task *task, void *data) +{ + struct nfs_client *clp; + struct nfs4_sequence_args *args; + struct nfs4_sequence_res *res; + + clp = (struct nfs_client *)data; + args = task->tk_msg.rpc_argp; + res = task->tk_msg.rpc_resp; + + if (nfs4_setup_sequence(clp, args, res, 0, task)) + return; + rpc_call_start(task); +} + +static const struct rpc_call_ops nfs41_sequence_ops = { + .rpc_call_done = nfs41_sequence_call_done, + .rpc_call_prepare = nfs41_sequence_prepare, +}; + +static int nfs41_proc_async_sequence(struct nfs_client *clp, + struct rpc_cred *cred) +{ + struct nfs4_sequence_args *args; + struct nfs4_sequence_res *res; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], + .rpc_cred = cred, + }; + + args = kzalloc(sizeof(*args), GFP_KERNEL); + if (!args) + return -ENOMEM; + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) { + kfree(args); + return -ENOMEM; + } + res->sr_slotid = NFS4_MAX_SLOT_TABLE; + msg.rpc_argp = args; + msg.rpc_resp = res; + + return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, + &nfs41_sequence_ops, (void *)clp); +} + +#endif /* CONFIG_NFS_V4_1 */ + +struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, .recover_open = nfs4_open_reclaim, .recover_lock = nfs4_lock_reclaim, + .establish_clid = nfs4_init_clientid, + .get_clid_cred = nfs4_get_setclientid_cred, +}; + +#if defined(CONFIG_NFS_V4_1) +struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { + .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, + .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, + .recover_open = nfs4_open_reclaim, + .recover_lock = nfs4_lock_reclaim, + .establish_clid = nfs4_proc_exchange_id, + .get_clid_cred = nfs4_get_exchange_id_cred, +}; +#endif /* CONFIG_NFS_V4_1 */ + +struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { + .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, + .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, + .recover_open = nfs4_open_expired, + .recover_lock = nfs4_lock_expired, + .establish_clid = nfs4_init_clientid, + .get_clid_cred = nfs4_get_setclientid_cred, }; -struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops = { +#if defined(CONFIG_NFS_V4_1) +struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, .recover_open = nfs4_open_expired, .recover_lock = nfs4_lock_expired, + .establish_clid = nfs4_proc_exchange_id, + .get_clid_cred = nfs4_get_exchange_id_cred, +}; +#endif /* CONFIG_NFS_V4_1 */ + +struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { + .sched_state_renewal = nfs4_proc_async_renew, + .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, + .renew_lease = nfs4_proc_renew, +}; + +#if defined(CONFIG_NFS_V4_1) +struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { + .sched_state_renewal = nfs41_proc_async_sequence, + .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, + .renew_lease = nfs4_proc_sequence, +}; +#endif + +/* + * Per minor version reboot and network partition recovery ops + */ + +struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[] = { + &nfs40_reboot_recovery_ops, +#if defined(CONFIG_NFS_V4_1) + &nfs41_reboot_recovery_ops, +#endif +}; + +struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[] = { + &nfs40_nograce_recovery_ops, +#if defined(CONFIG_NFS_V4_1) + &nfs41_nograce_recovery_ops, +#endif +}; + +struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[] = { + &nfs40_state_renewal_ops, +#if defined(CONFIG_NFS_V4_1) + &nfs41_state_renewal_ops, +#endif }; static const struct inode_operations nfs4_file_inode_operations = { |