aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/auth.c6
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c18
-rw-r--r--net/sunrpc/clnt.c205
-rw-r--r--net/sunrpc/pmap_clnt.c9
-rw-r--r--net/sunrpc/sched.c84
-rw-r--r--net/sunrpc/sunrpc_syms.c6
-rw-r--r--net/sunrpc/svc.c36
-rw-r--r--net/sunrpc/xdr.c298
-rw-r--r--net/sunrpc/xprt.c71
9 files changed, 593 insertions, 140 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 9bcec9b927b..505e2d4b3d6 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -66,10 +66,10 @@ rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt)
u32 flavor = pseudoflavor_to_flavor(pseudoflavor);
if (flavor >= RPC_AUTH_MAXFLAVOR || !(ops = auth_flavors[flavor]))
- return NULL;
+ return ERR_PTR(-EINVAL);
auth = ops->create(clnt, pseudoflavor);
- if (!auth)
- return NULL;
+ if (IS_ERR(auth))
+ return auth;
if (clnt->cl_auth)
rpcauth_destroy(clnt->cl_auth);
clnt->cl_auth = auth;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index a33b627cbef..2f7b867161d 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -660,14 +660,16 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
{
struct gss_auth *gss_auth;
struct rpc_auth * auth;
+ int err = -ENOMEM; /* XXX? */
dprintk("RPC: creating GSS authenticator for client %p\n",clnt);
if (!try_module_get(THIS_MODULE))
- return NULL;
+ return ERR_PTR(err);
if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
goto out_dec;
gss_auth->client = clnt;
+ err = -EINVAL;
gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
if (!gss_auth->mech) {
printk(KERN_WARNING "%s: Pseudoflavor %d not found!",
@@ -675,9 +677,8 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
goto err_free;
}
gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
- /* FIXME: Will go away once privacy support is merged in */
- if (gss_auth->service == RPC_GSS_SVC_PRIVACY)
- gss_auth->service = RPC_GSS_SVC_INTEGRITY;
+ if (gss_auth->service == 0)
+ goto err_put_mech;
INIT_LIST_HEAD(&gss_auth->upcalls);
spin_lock_init(&gss_auth->lock);
auth = &gss_auth->rpc_auth;
@@ -687,15 +688,18 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
auth->au_flavor = flavor;
atomic_set(&auth->au_count, 1);
- if (rpcauth_init_credcache(auth, GSS_CRED_EXPIRE) < 0)
+ err = rpcauth_init_credcache(auth, GSS_CRED_EXPIRE);
+ if (err)
goto err_put_mech;
snprintf(gss_auth->path, sizeof(gss_auth->path), "%s/%s",
clnt->cl_pathname,
gss_auth->mech->gm_name);
gss_auth->dentry = rpc_mkpipe(gss_auth->path, clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
- if (IS_ERR(gss_auth->dentry))
+ if (IS_ERR(gss_auth->dentry)) {
+ err = PTR_ERR(gss_auth->dentry);
goto err_put_mech;
+ }
return auth;
err_put_mech:
@@ -704,7 +708,7 @@ err_free:
kfree(gss_auth);
out_dec:
module_put(THIS_MODULE);
- return NULL;
+ return ERR_PTR(err);
}
static void
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 02bc029d46f..f17e6153b68 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -97,12 +97,13 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
* made to sleep too long.
*/
struct rpc_clnt *
-rpc_create_client(struct rpc_xprt *xprt, char *servname,
+rpc_new_client(struct rpc_xprt *xprt, char *servname,
struct rpc_program *program, u32 vers,
rpc_authflavor_t flavor)
{
struct rpc_version *version;
struct rpc_clnt *clnt = NULL;
+ struct rpc_auth *auth;
int err;
int len;
@@ -157,10 +158,11 @@ rpc_create_client(struct rpc_xprt *xprt, char *servname,
if (err < 0)
goto out_no_path;
- err = -ENOMEM;
- if (!rpcauth_create(flavor, clnt)) {
+ auth = rpcauth_create(flavor, clnt);
+ if (IS_ERR(auth)) {
printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
flavor);
+ err = PTR_ERR(auth);
goto out_no_auth;
}
@@ -178,6 +180,37 @@ out_no_path:
kfree(clnt->cl_server);
kfree(clnt);
out_err:
+ xprt_destroy(xprt);
+ return ERR_PTR(err);
+}
+
+/**
+ * Create an RPC client
+ * @xprt - pointer to xprt struct
+ * @servname - name of server
+ * @info - rpc_program
+ * @version - rpc_program version
+ * @authflavor - rpc_auth flavour to use
+ *
+ * Creates an RPC client structure, then pings the server in order to
+ * determine if it is up, and if it supports this program and version.
+ *
+ * This function should never be called by asynchronous tasks such as
+ * the portmapper.
+ */
+struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname,
+ struct rpc_program *info, u32 version, rpc_authflavor_t authflavor)
+{
+ struct rpc_clnt *clnt;
+ int err;
+
+ clnt = rpc_new_client(xprt, servname, info, version, authflavor);
+ if (IS_ERR(clnt))
+ return clnt;
+ err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
+ if (err == 0)
+ return clnt;
+ rpc_shutdown_client(clnt);
return ERR_PTR(err);
}
@@ -208,6 +241,8 @@ rpc_clone_client(struct rpc_clnt *clnt)
rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
if (new->cl_auth)
atomic_inc(&new->cl_auth->au_count);
+ new->cl_pmap = &new->cl_pmap_default;
+ rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait");
return new;
out_no_clnt:
printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__);
@@ -296,6 +331,44 @@ rpc_release_client(struct rpc_clnt *clnt)
rpc_destroy_client(clnt);
}
+/**
+ * rpc_bind_new_program - bind a new RPC program to an existing client
+ * @old - old rpc_client
+ * @program - rpc program to set
+ * @vers - rpc program version
+ *
+ * Clones the rpc client and sets up a new RPC program. This is mainly
+ * of use for enabling different RPC programs to share the same transport.
+ * The Sun NFSv2/v3 ACL protocol can do this.
+ */
+struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
+ struct rpc_program *program,
+ int vers)
+{
+ struct rpc_clnt *clnt;
+ struct rpc_version *version;
+ int err;
+
+ BUG_ON(vers >= program->nrvers || !program->version[vers]);
+ version = program->version[vers];
+ clnt = rpc_clone_client(old);
+ if (IS_ERR(clnt))
+ goto out;
+ clnt->cl_procinfo = version->procs;
+ clnt->cl_maxproc = version->nrprocs;
+ clnt->cl_protname = program->name;
+ clnt->cl_prog = program->number;
+ clnt->cl_vers = version->number;
+ clnt->cl_stats = program->stats;
+ err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
+ if (err != 0) {
+ rpc_shutdown_client(clnt);
+ clnt = ERR_PTR(err);
+ }
+out:
+ return clnt;
+}
+
/*
* Default callback for async RPC calls
*/
@@ -305,38 +378,41 @@ rpc_default_callback(struct rpc_task *task)
}
/*
- * Export the signal mask handling for aysnchronous code that
+ * Export the signal mask handling for synchronous code that
* sleeps on RPC calls
*/
+#define RPC_INTR_SIGNALS (sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGKILL))
+static void rpc_save_sigmask(sigset_t *oldset, int intr)
+{
+ unsigned long sigallow = 0;
+ sigset_t sigmask;
+
+ /* Block all signals except those listed in sigallow */
+ if (intr)
+ sigallow |= RPC_INTR_SIGNALS;
+ siginitsetinv(&sigmask, sigallow);
+ sigprocmask(SIG_BLOCK, &sigmask, oldset);
+}
+
+static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
+{
+ rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
+}
+
+static inline void rpc_restore_sigmask(sigset_t *oldset)
+{
+ sigprocmask(SIG_SETMASK, oldset, NULL);
+}
+
void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
{
- unsigned long sigallow = sigmask(SIGKILL);
- unsigned long irqflags;
-
- /* Turn off various signals */
- if (clnt->cl_intr) {
- struct k_sigaction *action = current->sighand->action;
- if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
- sigallow |= sigmask(SIGINT);
- if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
- sigallow |= sigmask(SIGQUIT);
- }
- spin_lock_irqsave(&current->sighand->siglock, irqflags);
- *oldset = current->blocked;
- siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
+ rpc_save_sigmask(oldset, clnt->cl_intr);
}
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
{
- unsigned long irqflags;
-
- spin_lock_irqsave(&current->sighand->siglock, irqflags);
- current->blocked = *oldset;
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
+ rpc_restore_sigmask(oldset);
}
/*
@@ -354,26 +430,26 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
BUG_ON(flags & RPC_TASK_ASYNC);
- rpc_clnt_sigmask(clnt, &oldset);
-
status = -ENOMEM;
task = rpc_new_task(clnt, NULL, flags);
if (task == NULL)
goto out;
+ /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
+ rpc_task_sigmask(task, &oldset);
+
rpc_call_setup(task, msg, 0);
/* Set up the call info struct and execute the task */
- if (task->tk_status == 0)
+ if (task->tk_status == 0) {
status = rpc_execute(task);
- else {
+ } else {
status = task->tk_status;
rpc_release_task(task);
}
+ rpc_restore_sigmask(&oldset);
out:
- rpc_clnt_sigunmask(clnt, &oldset);
-
return status;
}
@@ -394,8 +470,6 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
flags |= RPC_TASK_ASYNC;
- rpc_clnt_sigmask(clnt, &oldset);
-
/* Create/initialize a new RPC task */
if (!callback)
callback = rpc_default_callback;
@@ -404,6 +478,9 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
goto out;
task->tk_calldata = data;
+ /* Mask signals on GSS_AUTH upcalls */
+ rpc_task_sigmask(task, &oldset);
+
rpc_call_setup(task, msg, 0);
/* Set up the call info struct and execute the task */
@@ -413,9 +490,8 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
else
rpc_release_task(task);
+ rpc_restore_sigmask(&oldset);
out:
- rpc_clnt_sigunmask(clnt, &oldset);
-
return status;
}
@@ -593,7 +669,7 @@ call_allocate(struct rpc_task *task)
return;
printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
- if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) {
+ if (RPC_IS_ASYNC(task) || !signalled()) {
xprt_release(task);
task->tk_action = call_reserve;
rpc_delay(task, HZ>>4);
@@ -957,7 +1033,9 @@ call_header(struct rpc_task *task)
*p++ = htonl(clnt->cl_prog); /* program number */
*p++ = htonl(clnt->cl_vers); /* program version */
*p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
- return rpcauth_marshcred(task, p);
+ p = rpcauth_marshcred(task, p);
+ req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
+ return p;
}
/*
@@ -986,10 +1064,11 @@ call_verify(struct rpc_task *task)
case RPC_AUTH_ERROR:
break;
case RPC_MISMATCH:
- printk(KERN_WARNING "%s: RPC call version mismatch!\n", __FUNCTION__);
- goto out_eio;
+ dprintk("%s: RPC call version mismatch!\n", __FUNCTION__);
+ error = -EPROTONOSUPPORT;
+ goto out_err;
default:
- printk(KERN_WARNING "%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n);
+ dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n);
goto out_eio;
}
if (--len < 0)
@@ -1040,23 +1119,26 @@ call_verify(struct rpc_task *task)
case RPC_SUCCESS:
return p;
case RPC_PROG_UNAVAIL:
- printk(KERN_WARNING "RPC: call_verify: program %u is unsupported by server %s\n",
+ dprintk("RPC: call_verify: program %u is unsupported by server %s\n",
(unsigned int)task->tk_client->cl_prog,
task->tk_client->cl_server);
- goto out_eio;
+ error = -EPFNOSUPPORT;
+ goto out_err;
case RPC_PROG_MISMATCH:
- printk(KERN_WARNING "RPC: call_verify: program %u, version %u unsupported by server %s\n",
+ dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n",
(unsigned int)task->tk_client->cl_prog,
(unsigned int)task->tk_client->cl_vers,
task->tk_client->cl_server);
- goto out_eio;
+ error = -EPROTONOSUPPORT;
+ goto out_err;
case RPC_PROC_UNAVAIL:
- printk(KERN_WARNING "RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n",
+ dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n",
task->tk_msg.rpc_proc,
task->tk_client->cl_prog,
task->tk_client->cl_vers,
task->tk_client->cl_server);
- goto out_eio;
+ error = -EOPNOTSUPP;
+ goto out_err;
case RPC_GARBAGE_ARGS:
dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__);
break; /* retry */
@@ -1069,7 +1151,7 @@ out_retry:
task->tk_client->cl_stats->rpcgarbage++;
if (task->tk_garb_retry) {
task->tk_garb_retry--;
- dprintk(KERN_WARNING "RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid);
+ dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid);
task->tk_action = call_bind;
return NULL;
}
@@ -1083,3 +1165,30 @@ out_overflow:
printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__);
goto out_retry;
}
+
+static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj)
+{
+ return 0;
+}
+
+static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj)
+{
+ return 0;
+}
+
+static struct rpc_procinfo rpcproc_null = {
+ .p_encode = rpcproc_encode_null,
+ .p_decode = rpcproc_decode_null,
+};
+
+int rpc_ping(struct rpc_clnt *clnt, int flags)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &rpcproc_null,
+ };
+ int err;
+ msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
+ err = rpc_call_sync(clnt, &msg, flags);
+ put_rpccred(msg.rpc_cred);
+ return err;
+}
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index d0b1d2c34a4..4e81f276692 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -53,6 +53,9 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt)
task->tk_pid, clnt->cl_server,
map->pm_prog, map->pm_vers, map->pm_prot);
+ /* Autobind on cloned rpc clients is discouraged */
+ BUG_ON(clnt->cl_parent != clnt);
+
spin_lock(&pmap_lock);
if (map->pm_binding) {
rpc_sleep_on(&map->pm_bindwait, task, NULL, NULL);
@@ -207,12 +210,10 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto)
xprt->addr.sin_port = htons(RPC_PMAP_PORT);
/* printk("pmap: create clnt\n"); */
- clnt = rpc_create_client(xprt, hostname,
+ clnt = rpc_new_client(xprt, hostname,
&pmap_program, RPC_PMAP_VERSION,
RPC_AUTH_UNIX);
- if (IS_ERR(clnt)) {
- xprt_destroy(xprt);
- } else {
+ if (!IS_ERR(clnt)) {
clnt->cl_softrtry = 1;
clnt->cl_chatty = 1;
clnt->cl_oneshot = 1;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index c06614d0e31..2d9eb7fbd52 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -290,7 +290,7 @@ static void rpc_make_runnable(struct rpc_task *task)
return;
}
} else
- wake_up(&task->u.tk_wait.waitq);
+ wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
}
/*
@@ -555,6 +555,38 @@ __rpc_atrun(struct rpc_task *task)
}
/*
+ * Helper that calls task->tk_exit if it exists and then returns
+ * true if we should exit __rpc_execute.
+ */
+static inline int __rpc_do_exit(struct rpc_task *task)
+{
+ if (task->tk_exit != NULL) {
+ lock_kernel();
+ task->tk_exit(task);
+ unlock_kernel();
+ /* If tk_action is non-null, we should restart the call */
+ if (task->tk_action != NULL) {
+ if (!RPC_ASSASSINATED(task)) {
+ /* Release RPC slot and buffer memory */
+ xprt_release(task);
+ rpc_free(task);
+ return 0;
+ }
+ printk(KERN_ERR "RPC: dead task tried to walk away.\n");
+ }
+ }
+ return 1;
+}
+
+static int rpc_wait_bit_interruptible(void *word)
+{
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ schedule();
+ return 0;
+}
+
+/*
* This is the RPC `scheduler' (or rather, the finite state machine).
*/
static int __rpc_execute(struct rpc_task *task)
@@ -566,8 +598,7 @@ static int __rpc_execute(struct rpc_task *task)
BUG_ON(RPC_IS_QUEUED(task));
- restarted:
- while (1) {
+ for (;;) {
/*
* Garbage collection of pending timers...
*/
@@ -600,11 +631,12 @@ static int __rpc_execute(struct rpc_task *task)
* by someone else.
*/
if (!RPC_IS_QUEUED(task)) {
- if (!task->tk_action)
+ if (task->tk_action != NULL) {
+ lock_kernel();
+ task->tk_action(task);
+ unlock_kernel();
+ } else if (__rpc_do_exit(task))
break;
- lock_kernel();
- task->tk_action(task);
- unlock_kernel();
}
/*
@@ -624,44 +656,26 @@ static int __rpc_execute(struct rpc_task *task)
/* sync task: sleep here */
dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
- if (RPC_TASK_UNINTERRUPTIBLE(task)) {
- __wait_event(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task));
- } else {
- __wait_event_interruptible(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task), status);
+ /* Note: Caller should be using rpc_clnt_sigmask() */
+ status = out_of_line_wait_on_bit(&task->tk_runstate,
+ RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
+ TASK_INTERRUPTIBLE);
+ if (status == -ERESTARTSYS) {
/*
* When a sync task receives a signal, it exits with
* -ERESTARTSYS. In order to catch any callbacks that
* clean up after sleeping on some queue, we don't
* break the loop here, but go around once more.
*/
- if (status == -ERESTARTSYS) {
- dprintk("RPC: %4d got signal\n", task->tk_pid);
- task->tk_flags |= RPC_TASK_KILLED;
- rpc_exit(task, -ERESTARTSYS);
- rpc_wake_up_task(task);
- }
+ dprintk("RPC: %4d got signal\n", task->tk_pid);
+ task->tk_flags |= RPC_TASK_KILLED;
+ rpc_exit(task, -ERESTARTSYS);
+ rpc_wake_up_task(task);
}
rpc_set_running(task);
dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
}
- if (task->tk_exit) {
- lock_kernel();
- task->tk_exit(task);
- unlock_kernel();
- /* If tk_action is non-null, the user wants us to restart */
- if (task->tk_action) {
- if (!RPC_ASSASSINATED(task)) {
- /* Release RPC slot and buffer memory */
- if (task->tk_rqstp)
- xprt_release(task);
- rpc_free(task);
- goto restarted;
- }
- printk(KERN_ERR "RPC: dead task tries to walk away.\n");
- }
- }
-
dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);
status = task->tk_status;
@@ -759,8 +773,6 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action call
/* Initialize workqueue for async tasks */
task->tk_workqueue = rpciod_workqueue;
- if (!RPC_IS_ASYNC(task))
- init_waitqueue_head(&task->u.tk_wait.waitq);
if (clnt) {
atomic_inc(&clnt->cl_users);
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index d4f26bf9e73..32e8acbc60f 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -42,6 +42,7 @@ EXPORT_SYMBOL(rpc_release_task);
/* RPC client functions */
EXPORT_SYMBOL(rpc_create_client);
EXPORT_SYMBOL(rpc_clone_client);
+EXPORT_SYMBOL(rpc_bind_new_program);
EXPORT_SYMBOL(rpc_destroy_client);
EXPORT_SYMBOL(rpc_shutdown_client);
EXPORT_SYMBOL(rpc_release_client);
@@ -61,7 +62,6 @@ EXPORT_SYMBOL(rpc_mkpipe);
/* Client transport */
EXPORT_SYMBOL(xprt_create_proto);
-EXPORT_SYMBOL(xprt_destroy);
EXPORT_SYMBOL(xprt_set_timeout);
EXPORT_SYMBOL(xprt_udp_slot_table_entries);
EXPORT_SYMBOL(xprt_tcp_slot_table_entries);
@@ -129,6 +129,10 @@ EXPORT_SYMBOL(xdr_encode_netobj);
EXPORT_SYMBOL(xdr_encode_pages);
EXPORT_SYMBOL(xdr_inline_pages);
EXPORT_SYMBOL(xdr_shift_buf);
+EXPORT_SYMBOL(xdr_encode_word);
+EXPORT_SYMBOL(xdr_decode_word);
+EXPORT_SYMBOL(xdr_encode_array2);
+EXPORT_SYMBOL(xdr_decode_array2);
EXPORT_SYMBOL(xdr_buf_from_iov);
EXPORT_SYMBOL(xdr_buf_subsegment);
EXPORT_SYMBOL(xdr_buf_read_netobj);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index bb2d99f3331..e9bd91265f7 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -35,20 +35,24 @@ svc_create(struct svc_program *prog, unsigned int bufsize)
if (!(serv = (struct svc_serv *) kmalloc(sizeof(*serv), GFP_KERNEL)))
return NULL;
memset(serv, 0, sizeof(*serv));
+ serv->sv_name = prog->pg_name;
serv->sv_program = prog;
serv->sv_nrthreads = 1;
serv->sv_stats = prog->pg_stats;
serv->sv_bufsz = bufsize? bufsize : 4096;
- prog->pg_lovers = prog->pg_nvers-1;
xdrsize = 0;
- for (vers=0; vers<prog->pg_nvers ; vers++)
- if (prog->pg_vers[vers]) {
- prog->pg_hivers = vers;
- if (prog->pg_lovers > vers)
- prog->pg_lovers = vers;
- if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
- xdrsize = prog->pg_vers[vers]->vs_xdrsize;
- }
+ while (prog) {
+ prog->pg_lovers = prog->pg_nvers-1;
+ for (vers=0; vers<prog->pg_nvers ; vers++)
+ if (prog->pg_vers[vers]) {
+ prog->pg_hivers = vers;
+ if (prog->pg_lovers > vers)
+ prog->pg_lovers = vers;
+ if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
+ xdrsize = prog->pg_vers[vers]->vs_xdrsize;
+ }
+ prog = prog->pg_next;
+ }
serv->sv_xdrsize = xdrsize;
INIT_LIST_HEAD(&serv->sv_threads);
INIT_LIST_HEAD(&serv->sv_sockets);
@@ -56,8 +60,6 @@ svc_create(struct svc_program *prog, unsigned int bufsize)
INIT_LIST_HEAD(&serv->sv_permsocks);
spin_lock_init(&serv->sv_lock);
- serv->sv_name = prog->pg_name;
-
/* Remove any stale portmap registrations */
svc_register(serv, 0, 0);
@@ -281,6 +283,7 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
rqstp->rq_res.len = 0;
rqstp->rq_res.page_base = 0;
rqstp->rq_res.page_len = 0;
+ rqstp->rq_res.buflen = PAGE_SIZE;
rqstp->rq_res.tail[0].iov_len = 0;
/* tcp needs a space for the record length... */
if (rqstp->rq_prot == IPPROTO_TCP)
@@ -338,7 +341,10 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
goto sendit;
}
- if (prog != progp->pg_prog)
+ for (progp = serv->sv_program; progp; progp = progp->pg_next)
+ if (prog == progp->pg_prog)
+ break;
+ if (progp == NULL)
goto err_bad_prog;
if (vers >= progp->pg_nvers ||
@@ -451,11 +457,7 @@ err_bad_auth:
goto sendit;
err_bad_prog:
-#ifdef RPC_PARANOIA
- if (prog != 100227 || progp->pg_prog != 100003)
- printk("svc: unknown program %d (me %d)\n", prog, progp->pg_prog);
- /* else it is just a Solaris client seeing if ACLs are supported */
-#endif
+ dprintk("svc: unknown program %d\n", prog);
serv->sv_stats->rpcbadfmt++;
svc_putu32(resv, rpc_prog_unavail);
goto sendit;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 67b9f035ba8..8a4d9c106af 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -176,21 +176,23 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
xdr->buflen += len;
}
-void
+ssize_t
xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
skb_reader_t *desc,
skb_read_actor_t copy_actor)
{
struct page **ppage = xdr->pages;
unsigned int len, pglen = xdr->page_len;
+ ssize_t copied = 0;
int ret;
len = xdr->head[0].iov_len;
if (base < len) {
len -= base;
ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
+ copied += ret;
if (ret != len || !desc->count)
- return;
+ goto out;
base = 0;
} else
base -= len;
@@ -210,6 +212,17 @@ xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
do {
char *kaddr;
+ /* ACL likes to be lazy in allocating pages - ACLs
+ * are small by default but can get huge. */
+ if (unlikely(*ppage == NULL)) {
+ *ppage = alloc_page(GFP_ATOMIC);
+ if (unlikely(*ppage == NULL)) {
+ if (copied == 0)
+ copied = -ENOMEM;
+ goto out;
+ }
+ }
+
len = PAGE_CACHE_SIZE;
kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
if (base) {
@@ -225,14 +238,17 @@ xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
}
flush_dcache_page(*ppage);
kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
+ copied += ret;
if (ret != len || !desc->count)
- return;
+ goto out;
ppage++;
} while ((pglen -= len) != 0);
copy_tail:
len = xdr->tail[0].iov_len;
if (base < len)
- copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
+ copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
+out:
+ return copied;
}
@@ -616,12 +632,24 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
{
struct kvec *iov = buf->head;
+ int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
+ BUG_ON(scratch_len < 0);
xdr->buf = buf;
xdr->iov = iov;
- xdr->end = (uint32_t *)((char *)iov->iov_base + iov->iov_len);
- buf->len = iov->iov_len = (char *)p - (char *)iov->iov_base;
- xdr->p = p;
+ xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len);
+ xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len);
+ BUG_ON(iov->iov_len > scratch_len);
+
+ if (p != xdr->p && p != NULL) {
+ size_t len;
+
+ BUG_ON(p < xdr->p || p > xdr->end);
+ len = (char *)p - (char *)xdr->p;
+ xdr->p = p;
+ buf->len += len;
+ iov->iov_len += len;
+ }
}
EXPORT_SYMBOL(xdr_init_encode);
@@ -859,8 +887,34 @@ out:
return status;
}
-static int
-read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
+/* obj is assumed to point to allocated memory of size at least len: */
+int
+write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
+{
+ struct xdr_buf subbuf;
+ int this_len;
+ int status;
+
+ status = xdr_buf_subsegment(buf, &subbuf, base, len);
+ if (status)
+ goto out;
+ this_len = min(len, (int)subbuf.head[0].iov_len);
+ memcpy(subbuf.head[0].iov_base, obj, this_len);
+ len -= this_len;
+ obj += this_len;
+ this_len = min(len, (int)subbuf.page_len);
+ if (this_len)
+ _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len);
+ len -= this_len;
+ obj += this_len;
+ this_len = min(len, (int)subbuf.tail[0].iov_len);
+ memcpy(subbuf.tail[0].iov_base, obj, this_len);
+out:
+ return status;
+}
+
+int
+xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
{
u32 raw;
int status;
@@ -872,6 +926,14 @@ read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
return 0;
}
+int
+xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
+{
+ u32 raw = htonl(obj);
+
+ return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
+}
+
/* If the netobj starting offset bytes from the start of xdr_buf is contained
* entirely in the head or the tail, set object to point to it; otherwise
* try to find space for it at the end of the tail, copy it there, and
@@ -882,7 +944,7 @@ xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset)
u32 tail_offset = buf->head[0].iov_len + buf->page_len;
u32 obj_end_offset;
- if (read_u32_from_xdr_buf(buf, offset, &obj->len))
+ if (xdr_decode_word(buf, offset, &obj->len))
goto out;
obj_end_offset = offset + 4 + obj->len;
@@ -915,3 +977,219 @@ xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset)
out:
return -1;
}
+
+/* Returns 0 on success, or else a negative error code. */
+static int
+xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
+ struct xdr_array2_desc *desc, int encode)
+{
+ char *elem = NULL, *c;
+ unsigned int copied = 0, todo, avail_here;
+ struct page **ppages = NULL;
+ int err;
+
+ if (encode) {
+ if (xdr_encode_word(buf, base, desc->array_len) != 0)
+ return -EINVAL;
+ } else {
+ if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
+ (unsigned long) base + 4 + desc->array_len *
+ desc->elem_size > buf->len)
+ return -EINVAL;
+ }
+ base += 4;
+
+ if (!desc->xcode)
+ return 0;
+
+ todo = desc->array_len * desc->elem_size;
+
+ /* process head */
+ if (todo && base < buf->head->iov_len) {
+ c = buf->head->iov_base + base;
+ avail_here = min_t(unsigned int, todo,
+ buf->head->iov_len - base);
+ todo -= avail_here;
+
+ while (avail_here >= desc->elem_size) {
+ err = desc->xcode(desc, c);
+ if (err)
+ goto out;
+ c += desc->elem_size;
+ avail_here -= desc->elem_size;
+ }
+ if (avail_here) {
+ if (!elem) {
+ elem = kmalloc(desc->elem_size, GFP_KERNEL);
+ err = -ENOMEM;
+ if (!elem)
+ goto out;
+ }
+ if (encode) {
+ err = desc->xcode(desc, elem);
+ if (err)
+ goto out;
+ memcpy(c, elem, avail_here);
+ } else
+ memcpy(elem, c, avail_here);
+ copied = avail_here;
+ }
+ base = buf->head->iov_len; /* align to start of pages */
+ }
+
+ /* process pages array */
+ base -= buf->head->iov_len;
+ if (todo && base < buf->page_len) {
+ unsigned int avail_page;
+
+ avail_here = min(todo, buf->page_len - base);
+ todo -= avail_here;
+
+ base += buf->page_base;
+ ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
+ base &= ~PAGE_CACHE_MASK;
+ avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
+ avail_here);
+ c = kmap(*ppages) + base;
+
+ while (avail_here) {
+ avail_here -= avail_page;
+ if (copied || avail_page < desc->elem_size) {
+ unsigned int l = min(avail_page,
+ desc->elem_size - copied);
+ if (!elem) {
+ elem = kmalloc(desc->elem_size,
+ GFP_KERNEL);
+ err = -ENOMEM;
+ if (!elem)
+ goto out;
+ }
+ if (encode) {
+ if (!copied) {
+ err = desc->xcode(desc, elem);
+ if (err)
+ goto out;
+ }
+ memcpy(c, elem + copied, l);
+ copied += l;
+ if (copied == desc->elem_size)
+ copied = 0;
+ } else {
+ memcpy(elem + copied, c, l);
+ copied += l;
+ if (copied == desc->elem_size) {
+ err = desc->xcode(desc, elem);
+ if (err)
+ goto out;
+ copied = 0;
+ }
+ }
+ avail_page -= l;
+ c += l;
+ }
+ while (avail_page >= desc->elem_size) {
+ err = desc->xcode(desc, c);
+ if (err)
+ goto out;
+ c += desc->elem_size;
+ avail_page -= desc->elem_size;
+ }
+ if (avail_page) {
+ unsigned int l = min(avail_page,
+ desc->elem_size - copied);
+ if (!elem) {
+ elem = kmalloc(desc->elem_size,
+ GFP_KERNEL);
+ err = -ENOMEM;
+ if (!elem)
+ goto out;
+ }
+ if (encode) {
+ if (!copied) {
+ err = desc->xcode(desc, elem);
+ if (err)
+ goto out;
+ }
+ memcpy(c, elem + copied, l);
+ copied += l;
+ if (copied == desc->elem_size)
+ copied = 0;
+ } else {
+ memcpy(elem + copied, c, l);
+ copied += l;
+ if (copied == desc->elem_size) {
+ err = desc->xcode(desc, elem);
+ if (err)
+ goto out;
+ copied = 0;
+ }
+ }
+ }
+ if (avail_here) {
+ kunmap(*ppages);
+ ppages++;
+ c = kmap(*ppages);
+ }
+
+ avail_page = min(avail_here,
+ (unsigned int) PAGE_CACHE_SIZE);
+ }
+ base = buf->page_len; /* align to start of tail */
+ }
+
+ /* process tail */
+ base -= buf->page_len;
+ if (todo) {
+ c = buf->tail->iov_base + base;
+ if (copied) {
+ unsigned int l = desc->elem_size - copied;
+
+ if (encode)
+ memcpy(c, elem + copied, l);
+ else {
+ memcpy(elem + copied, c, l);
+ err = desc->xcode(desc, elem);
+ if (err)
+ goto out;
+ }
+ todo -= l;
+ c += l;
+ }
+ while (todo) {
+ err = desc->xcode(desc, c);
+ if (err)
+ goto out;
+ c += desc->elem_size;
+ todo -= desc->elem_size;
+ }
+ }
+ err = 0;
+
+out:
+ if (elem)
+ kfree(elem);
+ if (ppages)
+ kunmap(*ppages);
+ return err;
+}
+
+int
+xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
+ struct xdr_array2_desc *desc)
+{
+ if (base >= buf->len)
+ return -EINVAL;
+
+ return xdr_xcode_array2(buf, base, desc, 0);
+}
+
+int
+xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
+ struct xdr_array2_desc *desc)
+{
+ if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
+ buf->head->iov_len + buf->page_len + buf->tail->iov_len)
+ return -EINVAL;
+
+ return xdr_xcode_array2(buf, base, desc, 1);
+}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index c74a6bb9407..eca92405948 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -569,8 +569,11 @@ void xprt_connect(struct rpc_task *task)
if (xprt->sock != NULL)
schedule_delayed_work(&xprt->sock_connect,
RPC_REESTABLISH_TIMEOUT);
- else
+ else {
schedule_work(&xprt->sock_connect);
+ if (!RPC_IS_ASYNC(task))
+ flush_scheduled_work();
+ }
}
return;
out_write:
@@ -725,7 +728,8 @@ csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
goto no_checksum;
desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
- xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits);
+ if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0)
+ return -1;
if (desc.offset != skb->len) {
unsigned int csum2;
csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
@@ -737,7 +741,8 @@ csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
return -1;
return 0;
no_checksum:
- xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits);
+ if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
+ return -1;
if (desc.count)
return -1;
return 0;
@@ -821,10 +826,15 @@ tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
{
if (len > desc->count)
len = desc->count;
- if (skb_copy_bits(desc->skb, desc->offset, p, len))
+ if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
+ dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n",
+ len, desc->count);
return 0;
+ }
desc->offset += len;
desc->count -= len;
+ dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n",
+ len, desc->count);
return len;
}
@@ -863,6 +873,8 @@ tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
static void
tcp_check_recm(struct rpc_xprt *xprt)
{
+ dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
+ xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
if (xprt->tcp_offset == xprt->tcp_reclen) {
xprt->tcp_flags |= XPRT_COPY_RECM;
xprt->tcp_offset = 0;
@@ -907,6 +919,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
struct rpc_rqst *req;
struct xdr_buf *rcvbuf;
size_t len;
+ ssize_t r;
/* Find and lock the request corresponding to this xid */
spin_lock(&xprt->sock_lock);
@@ -927,15 +940,40 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
len = xprt->tcp_reclen - xprt->tcp_offset;
memcpy(&my_desc, desc, sizeof(my_desc));
my_desc.count = len;
- xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
+ r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
&my_desc, tcp_copy_data);
- desc->count -= len;
- desc->offset += len;
+ desc->count -= r;
+ desc->offset += r;
} else
- xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
+ r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
desc, tcp_copy_data);
- xprt->tcp_copied += len;
- xprt->tcp_offset += len;
+
+ if (r > 0) {
+ xprt->tcp_copied += r;
+ xprt->tcp_offset += r;
+ }
+ if (r != len) {
+ /* Error when copying to the receive buffer,
+ * usually because we weren't able to allocate
+ * additional buffer pages. All we can do now
+ * is turn off XPRT_COPY_DATA, so the request
+ * will not receive any additional updates,
+ * and time out.
+ * Any remaining data from this record will
+ * be discarded.
+ */
+ xprt->tcp_flags &= ~XPRT_COPY_DATA;
+ dprintk("RPC: XID %08x truncated request\n",
+ ntohl(xprt->tcp_xid));
+ dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
+ xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
+ goto out;
+ }
+
+ dprintk("RPC: XID %08x read %u bytes\n",
+ ntohl(xprt->tcp_xid), r);
+ dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
+ xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
if (xprt->tcp_copied == req->rq_private_buf.buflen)
xprt->tcp_flags &= ~XPRT_COPY_DATA;
@@ -944,6 +982,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
xprt->tcp_flags &= ~XPRT_COPY_DATA;
}
+out:
if (!(xprt->tcp_flags & XPRT_COPY_DATA)) {
dprintk("RPC: %4d received reply complete\n",
req->rq_task->tk_pid);
@@ -967,6 +1006,7 @@ tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
desc->count -= len;
desc->offset += len;
xprt->tcp_offset += len;
+ dprintk("RPC: discarded %u bytes\n", len);
tcp_check_recm(xprt);
}
@@ -1064,8 +1104,7 @@ tcp_state_change(struct sock *sk)
case TCP_SYN_RECV:
break;
default:
- if (xprt_test_and_clear_connected(xprt))
- rpc_wake_up_status(&xprt->pending, -ENOTCONN);
+ xprt_disconnect(xprt);
break;
}
out:
@@ -1203,6 +1242,8 @@ xprt_transmit(struct rpc_task *task)
list_add_tail(&req->rq_list, &xprt->recv);
spin_unlock_bh(&xprt->sock_lock);
xprt_reset_majortimeo(req);
+ /* Turn off autodisconnect */
+ del_singleshot_timer_sync(&xprt->timer);
}
} else if (!req->rq_bytes_sent)
return;
@@ -1333,8 +1374,6 @@ xprt_reserve(struct rpc_task *task)
spin_lock(&xprt->xprt_lock);
do_xprt_reserve(task);
spin_unlock(&xprt->xprt_lock);
- if (task->tk_rqstp)
- del_timer_sync(&xprt->timer);
}
}
@@ -1649,6 +1688,10 @@ xprt_shutdown(struct rpc_xprt *xprt)
rpc_wake_up(&xprt->backlog);
wake_up(&xprt->cong_wait);
del_timer_sync(&xprt->timer);
+
+ /* synchronously wait for connect worker to finish */
+ cancel_delayed_work(&xprt->sock_connect);
+ flush_scheduled_work();
}
/*