diff options
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/auth.c | 16 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_seal.c | 15 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_unseal.c | 4 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_wrap.c | 17 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_mech.c | 6 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_seal.c | 5 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_unseal.c | 4 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/svcauth_gss.c | 182 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 163 | ||||
-rw-r--r-- | net/sunrpc/clnt.c | 53 | ||||
-rw-r--r-- | net/sunrpc/pmap_clnt.c | 41 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 33 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 24 | ||||
-rw-r--r-- | net/sunrpc/stats.c | 119 | ||||
-rw-r--r-- | net/sunrpc/sunrpc_syms.c | 6 | ||||
-rw-r--r-- | net/sunrpc/svcauth.c | 122 | ||||
-rw-r--r-- | net/sunrpc/svcauth_unix.c | 229 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 29 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 49 |
20 files changed, 784 insertions, 335 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 8d6f1a176b1..55163af3dca 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -64,14 +64,26 @@ rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt) struct rpc_authops *ops; u32 flavor = pseudoflavor_to_flavor(pseudoflavor); - if (flavor >= RPC_AUTH_MAXFLAVOR || !(ops = auth_flavors[flavor])) - return ERR_PTR(-EINVAL); + auth = ERR_PTR(-EINVAL); + if (flavor >= RPC_AUTH_MAXFLAVOR) + goto out; + + /* FIXME - auth_flavors[] really needs an rw lock, + * and module refcounting. */ +#ifdef CONFIG_KMOD + if ((ops = auth_flavors[flavor]) == NULL) + request_module("rpc-auth-%u", flavor); +#endif + if ((ops = auth_flavors[flavor]) == NULL) + goto out; auth = ops->create(clnt, pseudoflavor); if (IS_ERR(auth)) return auth; if (clnt->cl_auth) rpcauth_destroy(clnt->cl_auth); clnt->cl_auth = auth; + +out: return auth; } diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index bb46efd92e5..900ef31f5a0 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -721,6 +721,8 @@ gss_destroy(struct rpc_auth *auth) gss_auth = container_of(auth, struct gss_auth, rpc_auth); rpc_unlink(gss_auth->path); + dput(gss_auth->dentry); + gss_auth->dentry = NULL; gss_mech_put(gss_auth->mech); rpcauth_free_credcache(auth); diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index d0dfdfd5e79..f43311221a7 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -70,15 +70,19 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif +spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED; + u32 gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, struct xdr_netobj *token) { struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; s32 checksum_type; - struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + char cksumdata[16]; + struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; unsigned char *ptr, *krb5_hdr, *msg_start; s32 now; + u32 seq_send; dprintk("RPC: gss_krb5_seal\n"); @@ -133,16 +137,15 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, BUG(); } - kfree(md5cksum.data); + spin_lock(&krb5_seq_lock); + seq_send = ctx->seq_send++; + spin_unlock(&krb5_seq_lock); if ((krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, - ctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))) + seq_send, krb5_hdr + 16, krb5_hdr + 8))) goto out_err; - ctx->seq_send++; - return ((ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); out_err: - kfree(md5cksum.data); return GSS_S_FAILURE; } diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index db055fd7d77..0828cf64100 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -79,7 +79,8 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, int signalg; int sealalg; s32 checksum_type; - struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + char cksumdata[16]; + struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; s32 now; int direction; s32 seqnum; @@ -176,6 +177,5 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, ret = GSS_S_COMPLETE; out: - kfree(md5cksum.data); return ret; } diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index af777cf9f25..89d1f3e1412 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -121,12 +121,14 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, { struct krb5_ctx *kctx = ctx->internal_ctx_id; s32 checksum_type; - struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + char cksumdata[16]; + struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; int blocksize = 0, plainlen; unsigned char *ptr, *krb5_hdr, *msg_start; s32 now; int headlen; struct page **tmp_pages; + u32 seq_send; dprintk("RPC: gss_wrap_kerberos\n"); @@ -205,23 +207,22 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, BUG(); } - kfree(md5cksum.data); + spin_lock(&krb5_seq_lock); + seq_send = kctx->seq_send++; + spin_unlock(&krb5_seq_lock); /* XXX would probably be more efficient to compute checksum * and encrypt at the same time: */ if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, - kctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))) + seq_send, krb5_hdr + 16, krb5_hdr + 8))) goto out_err; if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, pages)) goto out_err; - kctx->seq_send++; - return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); out_err: - if (md5cksum.data) kfree(md5cksum.data); return GSS_S_FAILURE; } @@ -232,7 +233,8 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) int signalg; int sealalg; s32 checksum_type; - struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + char cksumdata[16]; + struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; s32 now; int direction; s32 seqnum; @@ -358,6 +360,5 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) ret = GSS_S_COMPLETE; out: - if (md5cksum.data) kfree(md5cksum.data); return ret; } diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 58400807d4d..5bf11ccba7c 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c @@ -102,6 +102,12 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg) alg_mode = CRYPTO_TFM_MODE_CBC; setkey = 1; break; + case NID_cast5_cbc: + /* XXXX here in name only, not used */ + alg_name = "cast5"; + alg_mode = CRYPTO_TFM_MODE_CBC; + setkey = 0; /* XXX will need to set to 1 */ + break; case NID_md5: if (key.len == 0) { dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n"); diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index 86fbf7c3e39..18c7862bc23 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c @@ -57,7 +57,8 @@ spkm3_make_token(struct spkm3_ctx *ctx, { s32 checksum_type; char tokhdrbuf[25]; - struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + char cksumdata[16]; + struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf}; int tokenlen = 0; unsigned char *ptr; @@ -115,13 +116,11 @@ spkm3_make_token(struct spkm3_ctx *ctx, dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK not supported\n"); goto out_err; } - kfree(md5cksum.data); /* XXX need to implement sequence numbers, and ctx->expired */ return GSS_S_COMPLETE; out_err: - kfree(md5cksum.data); token->data = NULL; token->len = 0; return GSS_S_FAILURE; diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c index 96851b0ba1b..8537f581ef9 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c @@ -56,7 +56,8 @@ spkm3_read_token(struct spkm3_ctx *ctx, { s32 code; struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; - struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + char cksumdata[16]; + struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; unsigned char *ptr = (unsigned char *)read_token->data; unsigned char *cksum; int bodysize, md5elen; @@ -120,7 +121,6 @@ spkm3_read_token(struct spkm3_ctx *ctx, /* XXX: need to add expiration and sequencing */ ret = GSS_S_COMPLETE; out: - kfree(md5cksum.data); kfree(wire_cksum.data); return ret; } diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 23632d84d8d..4d7eb9e704d 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -78,7 +78,8 @@ struct rsi { static struct cache_head *rsi_table[RSI_HASHMAX]; static struct cache_detail rsi_cache; -static struct rsi *rsi_lookup(struct rsi *item, int set); +static struct rsi *rsi_update(struct rsi *new, struct rsi *old); +static struct rsi *rsi_lookup(struct rsi *item); static void rsi_free(struct rsi *rsii) { @@ -88,13 +89,11 @@ static void rsi_free(struct rsi *rsii) kfree(rsii->out_token.data); } -static void rsi_put(struct cache_head *item, struct cache_detail *cd) +static void rsi_put(struct kref *ref) { - struct rsi *rsii = container_of(item, struct rsi, h); - if (cache_put(item, cd)) { - rsi_free(rsii); - kfree(rsii); - } + struct rsi *rsii = container_of(ref, struct rsi, h.ref); + rsi_free(rsii); + kfree(rsii); } static inline int rsi_hash(struct rsi *item) @@ -103,8 +102,10 @@ static inline int rsi_hash(struct rsi *item) ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS); } -static inline int rsi_match(struct rsi *item, struct rsi *tmp) +static int rsi_match(struct cache_head *a, struct cache_head *b) { + struct rsi *item = container_of(a, struct rsi, h); + struct rsi *tmp = container_of(b, struct rsi, h); return netobj_equal(&item->in_handle, &tmp->in_handle) && netobj_equal(&item->in_token, &tmp->in_token); } @@ -125,8 +126,11 @@ static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src) return dup_to_netobj(dst, src->data, src->len); } -static inline void rsi_init(struct rsi *new, struct rsi *item) +static void rsi_init(struct cache_head *cnew, struct cache_head *citem) { + struct rsi *new = container_of(cnew, struct rsi, h); + struct rsi *item = container_of(citem, struct rsi, h); + new->out_handle.data = NULL; new->out_handle.len = 0; new->out_token.data = NULL; @@ -141,8 +145,11 @@ static inline void rsi_init(struct rsi *new, struct rsi *item) item->in_token.data = NULL; } -static inline void rsi_update(struct rsi *new, struct rsi *item) +static void update_rsi(struct cache_head *cnew, struct cache_head *citem) { + struct rsi *new = container_of(cnew, struct rsi, h); + struct rsi *item = container_of(citem, struct rsi, h); + BUG_ON(new->out_handle.data || new->out_token.data); new->out_handle.len = item->out_handle.len; item->out_handle.len = 0; @@ -157,6 +164,15 @@ static inline void rsi_update(struct rsi *new, struct rsi *item) new->minor_status = item->minor_status; } +static struct cache_head *rsi_alloc(void) +{ + struct rsi *rsii = kmalloc(sizeof(*rsii), GFP_KERNEL); + if (rsii) + return &rsii->h; + else + return NULL; +} + static void rsi_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) @@ -198,6 +214,10 @@ static int rsi_parse(struct cache_detail *cd, if (dup_to_netobj(&rsii.in_token, buf, len)) goto out; + rsip = rsi_lookup(&rsii); + if (!rsip) + goto out; + rsii.h.flags = 0; /* expiry */ expiry = get_expiry(&mesg); @@ -240,12 +260,14 @@ static int rsi_parse(struct cache_detail *cd, goto out; } rsii.h.expiry_time = expiry; - rsip = rsi_lookup(&rsii, 1); + rsip = rsi_update(&rsii, rsip); status = 0; out: rsi_free(&rsii); if (rsip) - rsi_put(&rsip->h, &rsi_cache); + cache_put(&rsip->h, &rsi_cache); + else + status = -ENOMEM; return status; } @@ -257,9 +279,37 @@ static struct cache_detail rsi_cache = { .cache_put = rsi_put, .cache_request = rsi_request, .cache_parse = rsi_parse, + .match = rsi_match, + .init = rsi_init, + .update = update_rsi, + .alloc = rsi_alloc, }; -static DefineSimpleCacheLookup(rsi, 0) +static struct rsi *rsi_lookup(struct rsi *item) +{ + struct cache_head *ch; + int hash = rsi_hash(item); + + ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash); + if (ch) + return container_of(ch, struct rsi, h); + else + return NULL; +} + +static struct rsi *rsi_update(struct rsi *new, struct rsi *old) +{ + struct cache_head *ch; + int hash = rsi_hash(new); + + ch = sunrpc_cache_update(&rsi_cache, &new->h, + &old->h, hash); + if (ch) + return container_of(ch, struct rsi, h); + else + return NULL; +} + /* * The rpcsec_context cache is used to store a context that is @@ -293,7 +343,8 @@ struct rsc { static struct cache_head *rsc_table[RSC_HASHMAX]; static struct cache_detail rsc_cache; -static struct rsc *rsc_lookup(struct rsc *item, int set); +static struct rsc *rsc_update(struct rsc *new, struct rsc *old); +static struct rsc *rsc_lookup(struct rsc *item); static void rsc_free(struct rsc *rsci) { @@ -304,14 +355,12 @@ static void rsc_free(struct rsc *rsci) put_group_info(rsci->cred.cr_group_info); } -static void rsc_put(struct cache_head *item, struct cache_detail *cd) +static void rsc_put(struct kref *ref) { - struct rsc *rsci = container_of(item, struct rsc, h); + struct rsc *rsci = container_of(ref, struct rsc, h.ref); - if (cache_put(item, cd)) { - rsc_free(rsci); - kfree(rsci); - } + rsc_free(rsci); + kfree(rsci); } static inline int @@ -320,15 +369,21 @@ rsc_hash(struct rsc *rsci) return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS); } -static inline int -rsc_match(struct rsc *new, struct rsc *tmp) +static int +rsc_match(struct cache_head *a, struct cache_head *b) { + struct rsc *new = container_of(a, struct rsc, h); + struct rsc *tmp = container_of(b, struct rsc, h); + return netobj_equal(&new->handle, &tmp->handle); } -static inline void -rsc_init(struct rsc *new, struct rsc *tmp) +static void +rsc_init(struct cache_head *cnew, struct cache_head *ctmp) { + struct rsc *new = container_of(cnew, struct rsc, h); + struct rsc *tmp = container_of(ctmp, struct rsc, h); + new->handle.len = tmp->handle.len; tmp->handle.len = 0; new->handle.data = tmp->handle.data; @@ -337,9 +392,12 @@ rsc_init(struct rsc *new, struct rsc *tmp) new->cred.cr_group_info = NULL; } -static inline void -rsc_update(struct rsc *new, struct rsc *tmp) +static void +update_rsc(struct cache_head *cnew, struct cache_head *ctmp) { + struct rsc *new = container_of(cnew, struct rsc, h); + struct rsc *tmp = container_of(ctmp, struct rsc, h); + new->mechctx = tmp->mechctx; tmp->mechctx = NULL; memset(&new->seqdata, 0, sizeof(new->seqdata)); @@ -348,6 +406,16 @@ rsc_update(struct rsc *new, struct rsc *tmp) tmp->cred.cr_group_info = NULL; } +static struct cache_head * +rsc_alloc(void) +{ + struct rsc *rsci = kmalloc(sizeof(*rsci), GFP_KERNEL); + if (rsci) + return &rsci->h; + else + return NULL; +} + static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen) { @@ -373,6 +441,10 @@ static int rsc_parse(struct cache_detail *cd, if (expiry == 0) goto out; + rscp = rsc_lookup(&rsci); + if (!rscp) + goto out; + /* uid, or NEGATIVE */ rv = get_int(&mesg, &rsci.cred.cr_uid); if (rv == -EINVAL) @@ -428,12 +500,14 @@ static int rsc_parse(struct cache_detail *cd, gss_mech_put(gm); } rsci.h.expiry_time = expiry; - rscp = rsc_lookup(&rsci, 1); + rscp = rsc_update(&rsci, rscp); status = 0; out: rsc_free(&rsci); if (rscp) - rsc_put(&rscp->h, &rsc_cache); + cache_put(&rscp->h, &rsc_cache); + else + status = -ENOMEM; return status; } @@ -444,9 +518,37 @@ static struct cache_detail rsc_cache = { .name = "auth.rpcsec.context", .cache_put = rsc_put, .cache_parse = rsc_parse, + .match = rsc_match, + .init = rsc_init, + .update = update_rsc, + .alloc = rsc_alloc, }; -static DefineSimpleCacheLookup(rsc, 0); +static struct rsc *rsc_lookup(struct rsc *item) +{ + struct cache_head *ch; + int hash = rsc_hash(item); + + ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash); + if (ch) + return container_of(ch, struct rsc, h); + else + return NULL; +} + +static struct rsc *rsc_update(struct rsc *new, struct rsc *old) +{ + struct cache_head *ch; + int hash = rsc_hash(new); + + ch = sunrpc_cache_update(&rsc_cache, &new->h, + &old->h, hash); + if (ch) + return container_of(ch, struct rsc, h); + else + return NULL; +} + static struct rsc * gss_svc_searchbyctx(struct xdr_netobj *handle) @@ -457,7 +559,7 @@ gss_svc_searchbyctx(struct xdr_netobj *handle) memset(&rsci, 0, sizeof(rsci)); if (dup_to_netobj(&rsci.handle, handle->data, handle->len)) return NULL; - found = rsc_lookup(&rsci, 0); + found = rsc_lookup(&rsci); rsc_free(&rsci); if (!found) return NULL; @@ -645,6 +747,8 @@ find_gss_auth_domain(struct gss_ctx *ctx, u32 svc) return auth_domain_find(name); } +static struct auth_ops svcauthops_gss; + int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) { @@ -655,20 +759,18 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) goto out; - cache_init(&new->h.h); + kref_init(&new->h.ref); new->h.name = kmalloc(strlen(name) + 1, GFP_KERNEL); if (!new->h.name) goto out_free_dom; strcpy(new->h.name, name); - new->h.flavour = RPC_AUTH_GSS; + new->h.flavour = &svcauthops_gss; new->pseudoflavor = pseudoflavor; - new->h.h.expiry_time = NEVER; - test = auth_domain_lookup(&new->h, 1); - if (test == &new->h) { - BUG_ON(atomic_dec_and_test(&new->h.h.refcnt)); - } else { /* XXX Duplicate registration? */ + test = auth_domain_lookup(name, &new->h); + if (test != &new->h) { /* XXX Duplicate registration? */ auth_domain_put(&new->h); + /* dangling ref-count... */ goto out; } return 0; @@ -895,7 +997,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp) goto drop; } - rsip = rsi_lookup(&rsikey, 0); + rsip = rsi_lookup(&rsikey); rsi_free(&rsikey); if (!rsip) { goto drop; @@ -970,7 +1072,7 @@ drop: ret = SVC_DROP; out: if (rsci) - rsc_put(&rsci->h, &rsc_cache); + cache_put(&rsci->h, &rsc_cache); return ret; } @@ -1062,7 +1164,7 @@ out_err: put_group_info(rqstp->rq_cred.cr_group_info); rqstp->rq_cred.cr_group_info = NULL; if (gsd->rsci) - rsc_put(&gsd->rsci->h, &rsc_cache); + cache_put(&gsd->rsci->h, &rsc_cache); gsd->rsci = NULL; return stat; diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 0acccfeeb28..3ac4193a78e 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -37,16 +37,138 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item); static void cache_revisit_request(struct cache_head *item); -void cache_init(struct cache_head *h) +static void cache_init(struct cache_head *h) { time_t now = get_seconds(); h->next = NULL; h->flags = 0; - atomic_set(&h->refcnt, 1); + kref_init(&h->ref); h->expiry_time = now + CACHE_NEW_EXPIRY; h->last_refresh = now; } +struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, + struct cache_head *key, int hash) +{ + struct cache_head **head, **hp; + struct cache_head *new = NULL; + + head = &detail->hash_table[hash]; + + read_lock(&detail->hash_lock); + + for (hp=head; *hp != NULL ; hp = &(*hp)->next) { + struct cache_head *tmp = *hp; + if (detail->match(tmp, key)) { + cache_get(tmp); + read_unlock(&detail->hash_lock); + return tmp; + } + } + read_unlock(&detail->hash_lock); + /* Didn't find anything, insert an empty entry */ + + new = detail->alloc(); + if (!new) + return NULL; + cache_init(new); + + write_lock(&detail->hash_lock); + + /* check if entry appeared while we slept */ + for (hp=head; *hp != NULL ; hp = &(*hp)->next) { + struct cache_head *tmp = *hp; + if (detail->match(tmp, key)) { + cache_get(tmp); + write_unlock(&detail->hash_lock); + cache_put(new, detail); + return tmp; + } + } + detail->init(new, key); + new->next = *head; + *head = new; + detail->entries++; + cache_get(new); + write_unlock(&detail->hash_lock); + + return new; +} +EXPORT_SYMBOL(sunrpc_cache_lookup); + + +static void queue_loose(struct cache_detail *detail, struct cache_head *ch); + +static int cache_fresh_locked(struct cache_head *head, time_t expiry) +{ + head->expiry_time = expiry; + head->last_refresh = get_seconds(); + return !test_and_set_bit(CACHE_VALID, &head->flags); +} + +static void cache_fresh_unlocked(struct cache_head *head, + struct cache_detail *detail, int new) +{ + if (new) + cache_revisit_request(head); + if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { + cache_revisit_request(head); + queue_loose(detail, head); + } +} + +struct cache_head *sunrpc_cache_update(struct cache_detail *detail, + struct cache_head *new, struct cache_head *old, int hash) +{ + /* The 'old' entry is to be replaced by 'new'. + * If 'old' is not VALID, we update it directly, + * otherwise we need to replace it + */ + struct cache_head **head; + struct cache_head *tmp; + int is_new; + + if (!test_bit(CACHE_VALID, &old->flags)) { + write_lock(&detail->hash_lock); + if (!test_bit(CACHE_VALID, &old->flags)) { + if (test_bit(CACHE_NEGATIVE, &new->flags)) + set_bit(CACHE_NEGATIVE, &old->flags); + else + detail->update(old, new); + is_new = cache_fresh_locked(old, new->expiry_time); + write_unlock(&detail->hash_lock); + cache_fresh_unlocked(old, detail, is_new); + return old; + } + write_unlock(&detail->hash_lock); + } + /* We need to insert a new entry */ + tmp = detail->alloc(); + if (!tmp) { + cache_put(old, detail); + return NULL; + } + cache_init(tmp); + detail->init(tmp, old); + head = &detail->hash_table[hash]; + + write_lock(&detail->hash_lock); + if (test_bit(CACHE_NEGATIVE, &new->flags)) + set_bit(CACHE_NEGATIVE, &tmp->flags); + else + detail->update(tmp, new); + tmp->next = *head; + *head = tmp; + cache_get(tmp); + is_new = cache_fresh_locked(tmp, new->expiry_time); + cache_fresh_locked(old, 0); + write_unlock(&detail->hash_lock); + cache_fresh_unlocked(tmp, detail, is_new); + cache_fresh_unlocked(old, detail, 0); + cache_put(old, detail); + return tmp; +} +EXPORT_SYMBOL(sunrpc_cache_update); static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); /* @@ -94,7 +216,8 @@ int cache_check(struct cache_detail *detail, clear_bit(CACHE_PENDING, &h->flags); if (rv == -EAGAIN) { set_bit(CACHE_NEGATIVE, &h->flags); - cache_fresh(detail, h, get_seconds()+CACHE_NEW_EXPIRY); + cache_fresh_unlocked(h, detail, + cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY)); rv = -ENOENT; } break; @@ -110,25 +233,11 @@ int cache_check(struct cache_detail *detail, if (rv == -EAGAIN) cache_defer_req(rqstp, h); - if (rv && h) - detail->cache_put(h, detail); + if (rv) + cache_put(h, detail); return rv; } -static void queue_loose(struct cache_detail *detail, struct cache_head *ch); - -void cache_fresh(struct cache_detail *detail, - struct cache_head *head, time_t expiry) -{ - - head->expiry_time = expiry; - head->last_refresh = get_seconds(); - if (!test_and_set_bit(CACHE_VALID, &head->flags)) - cache_revisit_request(head); - if (test_and_clear_bit(CACHE_PENDING, &head->flags)) - queue_loose(detail, head); -} - /* * caches need to be periodically cleaned. * For this we maintain a list of cache_detail and @@ -322,7 +431,7 @@ static int cache_clean(void) if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) queue_loose(current_detail, ch); - if (atomic_read(&ch->refcnt) == 1) + if (atomic_read(&ch->ref.refcount) == 1) break; } if (ch) { @@ -337,7 +446,7 @@ static int cache_clean(void) current_index ++; spin_unlock(&cache_list_lock); if (ch) - d->cache_put(ch, d); + cache_put(ch, d); } else spin_unlock(&cache_list_lock); @@ -453,7 +562,7 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item) /* there was one too many */ dreq->revisit(dreq, 1); } - if (test_bit(CACHE_VALID, &item->flags)) { + if (!test_bit(CACHE_PENDING, &item->flags)) { /* must have just been validated... */ cache_revisit_request(item); } @@ -614,7 +723,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) !test_bit(CACHE_PENDING, &rq->item->flags)) { list_del(&rq->q.list); spin_unlock(&queue_lock); - cd->cache_put(rq->item, cd); + cache_put(rq->item, cd); kfree(rq->buf); kfree(rq); } else @@ -794,10 +903,10 @@ static void queue_loose(struct cache_detail *detail, struct cache_head *ch) if (cr->item != ch) continue; if (cr->readers != 0) - break; + continue; list_del(&cr->q.list); spin_unlock(&queue_lock); - detail->cache_put(cr->item, detail); + cache_put(cr->item, detail); kfree(cr->buf); kfree(cr); return; @@ -1082,8 +1191,8 @@ static int c_show(struct seq_file *m, void *p) return cd->cache_show(m, cd, NULL); ifdebug(CACHE) - seq_printf(m, "# expiry=%ld refcnt=%d\n", - cp->expiry_time, atomic_read(&cp->refcnt)); + seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", + cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags); cache_get(cp); if (cache_check(cd, cp, NULL)) /* cache_check does a cache_put on failure */ diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index d7847978204..aa8965e9d30 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -28,12 +28,11 @@ #include <linux/mm.h> #include <linux/slab.h> #include <linux/utsname.h> +#include <linux/workqueue.h> #include <linux/sunrpc/clnt.h> -#include <linux/workqueue.h> #include <linux/sunrpc/rpc_pipe_fs.h> - -#include <linux/nfs.h> +#include <linux/sunrpc/metrics.h> #define RPC_SLACK_SPACE (1024) /* total overkill */ @@ -71,8 +70,15 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) static uint32_t clntid; int error; + clnt->cl_vfsmnt = ERR_PTR(-ENOENT); + clnt->cl_dentry = ERR_PTR(-ENOENT); if (dir_name == NULL) return 0; + + clnt->cl_vfsmnt = rpc_get_mount(); + if (IS_ERR(clnt->cl_vfsmnt)) + return PTR_ERR(clnt->cl_vfsmnt); + for (;;) { snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), "%s/clnt%x", dir_name, @@ -85,6 +91,7 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) if (error != -EEXIST) { printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", clnt->cl_pathname, error); + rpc_put_mount(); return error; } } @@ -147,6 +154,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, clnt->cl_vers = version->number; clnt->cl_prot = xprt->prot; clnt->cl_stats = program->stats; + clnt->cl_metrics = rpc_alloc_iostats(clnt); rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); if (!clnt->cl_port) @@ -175,7 +183,11 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, return clnt; out_no_auth: - rpc_rmdir(clnt->cl_pathname); + if (!IS_ERR(clnt->cl_dentry)) { + rpc_rmdir(clnt->cl_pathname); + dput(clnt->cl_dentry); + rpc_put_mount(); + } out_no_path: if (clnt->cl_server != clnt->cl_inline_name) kfree(clnt->cl_server); @@ -240,11 +252,15 @@ rpc_clone_client(struct rpc_clnt *clnt) new->cl_autobind = 0; new->cl_oneshot = 0; new->cl_dead = 0; + if (!IS_ERR(new->cl_dentry)) { + dget(new->cl_dentry); + rpc_get_mount(); + } rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); new->cl_pmap = &new->cl_pmap_default; - rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); + new->cl_metrics = rpc_alloc_iostats(clnt); return new; out_no_clnt: printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); @@ -314,6 +330,12 @@ rpc_destroy_client(struct rpc_clnt *clnt) if (clnt->cl_server != clnt->cl_inline_name) kfree(clnt->cl_server); out_free: + rpc_free_iostats(clnt->cl_metrics); + clnt->cl_metrics = NULL; + if (!IS_ERR(clnt->cl_dentry)) { + dput(clnt->cl_dentry); + rpc_put_mount(); + } kfree(clnt); return 0; } @@ -473,15 +495,16 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, int status; /* If this client is slain all further I/O fails */ + status = -EIO; if (clnt->cl_dead) - return -EIO; + goto out_release; flags |= RPC_TASK_ASYNC; /* Create/initialize a new RPC task */ status = -ENOMEM; if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) - goto out; + goto out_release; /* Mask signals on GSS_AUTH upcalls */ rpc_task_sigmask(task, &oldset); @@ -496,7 +519,10 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, rpc_release_task(task); rpc_restore_sigmask(&oldset); -out: + return status; +out_release: + if (tk_ops->rpc_release != NULL) + tk_ops->rpc_release(data); return status; } @@ -993,6 +1019,8 @@ call_timeout(struct rpc_task *task) } dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); + task->tk_timeouts++; + if (RPC_IS_SOFT(task)) { printk(KERN_NOTICE "%s: server %s not responding, timed out\n", clnt->cl_protname, clnt->cl_server); @@ -1045,6 +1073,11 @@ call_decode(struct rpc_task *task) return; } + /* + * Ensure that we see all writes made by xprt_complete_rqst() + * before it changed req->rq_received. + */ + smp_rmb(); req->rq_rcv_buf.len = req->rq_private_buf.len; /* Check that the softirq receive buffer is valid */ @@ -1194,8 +1227,8 @@ call_verify(struct rpc_task *task) task->tk_action = call_bind; goto out_retry; case RPC_AUTH_TOOWEAK: - printk(KERN_NOTICE "call_verify: server requires stronger " - "authentication.\n"); + printk(KERN_NOTICE "call_verify: server %s requires stronger " + "authentication.\n", task->tk_client->cl_server); break; default: printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index 8139ce68e91..d25b054ec92 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c @@ -82,6 +82,7 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt) rpc_call_setup(child, &msg, 0); /* ... and run the child task */ + task->tk_xprt->stat.bind_count++; rpc_run_child(task, child, pmap_getport_done); return; @@ -103,6 +104,11 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) .pm_prot = prot, .pm_port = 0 }; + struct rpc_message msg = { + .rpc_proc = &pmap_procedures[PMAP_GETPORT], + .rpc_argp = &map, + .rpc_resp = &map.pm_port, + }; struct rpc_clnt *pmap_clnt; char hostname[32]; int status; @@ -116,7 +122,7 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) return PTR_ERR(pmap_clnt); /* Setup the call info struct */ - status = rpc_call(pmap_clnt, PMAP_GETPORT, &map, &map.pm_port, 0); + status = rpc_call_sync(pmap_clnt, &msg, 0); if (status >= 0) { if (map.pm_port != 0) @@ -161,16 +167,27 @@ pmap_getport_done(struct rpc_task *task) int rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) { - struct sockaddr_in sin; - struct rpc_portmap map; + struct sockaddr_in sin = { + .sin_family = AF_INET, + .sin_addr.s_addr = htonl(INADDR_LOOPBACK), + }; + struct rpc_portmap map = { + .pm_prog = prog, + .pm_vers = vers, + .pm_prot = prot, + .pm_port = port, + }; + struct rpc_message msg = { + .rpc_proc = &pmap_procedures[port ? PMAP_SET : PMAP_UNSET], + .rpc_argp = &map, + .rpc_resp = okay, + }; struct rpc_clnt *pmap_clnt; int error = 0; dprintk("RPC: registering (%d, %d, %d, %d) with portmapper.\n", prog, vers, prot, port); - sin.sin_family = AF_INET; - sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); if (IS_ERR(pmap_clnt)) { error = PTR_ERR(pmap_clnt); @@ -178,13 +195,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) return error; } - map.pm_prog = prog; - map.pm_vers = vers; - map.pm_prot = prot; - map.pm_port = port; - - error = rpc_call(pmap_clnt, port? PMAP_SET : PMAP_UNSET, - &map, okay, 0); + error = rpc_call_sync(pmap_clnt, &msg, 0); if (error < 0) { printk(KERN_WARNING @@ -260,6 +271,8 @@ static struct rpc_procinfo pmap_procedures[] = { .p_decode = (kxdrproc_t) xdr_decode_bool, .p_bufsiz = 4, .p_count = 1, + .p_statidx = PMAP_SET, + .p_name = "SET", }, [PMAP_UNSET] = { .p_proc = PMAP_UNSET, @@ -267,6 +280,8 @@ static struct rpc_procinfo pmap_procedures[] = { .p_decode = (kxdrproc_t) xdr_decode_bool, .p_bufsiz = 4, .p_count = 1, + .p_statidx = PMAP_UNSET, + .p_name = "UNSET", }, [PMAP_GETPORT] = { .p_proc = PMAP_GETPORT, @@ -274,6 +289,8 @@ static struct rpc_procinfo pmap_procedures[] = { .p_decode = (kxdrproc_t) xdr_decode_port, .p_bufsiz = 4, .p_count = 1, + .p_statidx = PMAP_GETPORT, + .p_name = "GETPORT", }, }; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index ad9d9fc4e73..cc673dd8433 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -91,7 +91,8 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) res = 0; } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { if (list_empty(&rpci->pipe)) - schedule_delayed_work(&rpci->queue_timeout, + queue_delayed_work(rpciod_workqueue, + &rpci->queue_timeout, RPC_UPCALL_TIMEOUT); list_add_tail(&msg->list, &rpci->pipe); rpci->pipelen += msg->len; @@ -132,7 +133,7 @@ rpc_close_pipes(struct inode *inode) if (ops->release_pipe) ops->release_pipe(inode); cancel_delayed_work(&rpci->queue_timeout); - flush_scheduled_work(); + flush_workqueue(rpciod_workqueue); } rpc_inode_setowner(inode, NULL); mutex_unlock(&inode->i_mutex); @@ -394,7 +395,7 @@ enum { */ struct rpc_filelist { char *name; - struct file_operations *i_fop; + const struct file_operations *i_fop; int mode; }; @@ -434,14 +435,17 @@ static struct rpc_filelist authfiles[] = { }, }; -static int -rpc_get_mount(void) +struct vfsmount *rpc_get_mount(void) { - return simple_pin_fs("rpc_pipefs", &rpc_mount, &rpc_mount_count); + int err; + + err = simple_pin_fs("rpc_pipefs", &rpc_mount, &rpc_mount_count); + if (err != 0) + return ERR_PTR(err); + return rpc_mount; } -static void -rpc_put_mount(void) +void rpc_put_mount(void) { simple_release_fs(&rpc_mount, &rpc_mount_count); } @@ -451,12 +455,13 @@ rpc_lookup_parent(char *path, struct nameidata *nd) { if (path[0] == '\0') return -ENOENT; - if (rpc_get_mount()) { + nd->mnt = rpc_get_mount(); + if (IS_ERR(nd->mnt)) { printk(KERN_WARNING "%s: %s failed to mount " "pseudofilesystem \n", __FILE__, __FUNCTION__); - return -ENODEV; + return PTR_ERR(nd->mnt); } - nd->mnt = mntget(rpc_mount); + mntget(nd->mnt); nd->dentry = dget(rpc_mount->mnt_root); nd->last_type = LAST_ROOT; nd->flags = LOOKUP_PARENT; @@ -593,7 +598,6 @@ __rpc_mkdir(struct inode *dir, struct dentry *dentry) d_instantiate(dentry, inode); dir->i_nlink++; inode_dir_notify(dir, DN_CREATE); - rpc_get_mount(); return 0; out_err: printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", @@ -614,7 +618,6 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry) if (!error) { inode_dir_notify(dir, DN_DELETE); d_drop(dentry); - rpc_put_mount(); } return 0; } @@ -668,7 +671,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client) out: mutex_unlock(&dir->i_mutex); rpc_release_path(&nd); - return dentry; + return dget(dentry); err_depopulate: rpc_depopulate(dentry); __rpc_rmdir(dir, dentry); @@ -732,7 +735,7 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) out: mutex_unlock(&dir->i_mutex); rpc_release_path(&nd); - return dentry; + return dget(dentry); err_dput: dput(dentry); dentry = ERR_PTR(-ENOMEM); diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index dff07795bd1..5c3eee76850 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -65,7 +65,7 @@ static LIST_HEAD(all_tasks); */ static DEFINE_MUTEX(rpciod_mutex); static unsigned int rpciod_users; -static struct workqueue_struct *rpciod_workqueue; +struct workqueue_struct *rpciod_workqueue; /* * Spinlock for other critical sections of code. @@ -182,6 +182,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task * else list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); task->u.tk_wait.rpc_waitq = queue; + queue->qlen++; rpc_set_queued(task); dprintk("RPC: %4d added to queue %p \"%s\"\n", @@ -216,6 +217,7 @@ static void __rpc_remove_wait_queue(struct rpc_task *task) __rpc_remove_wait_queue_priority(task); else list_del(&task->u.tk_wait.list); + queue->qlen--; dprintk("RPC: %4d removed from queue %p \"%s\"\n", task->tk_pid, queue, rpc_qname(queue)); } @@ -816,6 +818,9 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons BUG_ON(task->tk_ops == NULL); + /* starting timestamp */ + task->tk_start = jiffies; + dprintk("RPC: %4d new task procpid %d\n", task->tk_pid, current->pid); } @@ -917,8 +922,11 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, { struct rpc_task *task; task = rpc_new_task(clnt, flags, ops, data); - if (task == NULL) + if (task == NULL) { + if (ops->rpc_release != NULL) + ops->rpc_release(data); return ERR_PTR(-ENOMEM); + } atomic_inc(&task->tk_count); rpc_execute(task); return task; @@ -1159,16 +1167,12 @@ rpc_init_mempool(void) NULL, NULL); if (!rpc_buffer_slabp) goto err_nomem; - rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE, - mempool_alloc_slab, - mempool_free_slab, - rpc_task_slabp); + rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, + rpc_task_slabp); if (!rpc_task_mempool) goto err_nomem; - rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE, - mempool_alloc_slab, - mempool_free_slab, - rpc_buffer_slabp); + rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, + rpc_buffer_slabp); if (!rpc_buffer_mempool) goto err_nomem; return 0; diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 4979f226e28..dea529666d6 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c @@ -21,6 +21,7 @@ #include <linux/seq_file.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svcsock.h> +#include <linux/sunrpc/metrics.h> #define RPCDBG_FACILITY RPCDBG_MISC @@ -106,11 +107,125 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) { } } +/** + * rpc_alloc_iostats - allocate an rpc_iostats structure + * @clnt: RPC program, version, and xprt + * + */ +struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) +{ + unsigned int ops = clnt->cl_maxproc; + size_t size = ops * sizeof(struct rpc_iostats); + struct rpc_iostats *new; + + new = kmalloc(size, GFP_KERNEL); + if (new) + memset(new, 0 , size); + return new; +} +EXPORT_SYMBOL(rpc_alloc_iostats); + +/** + * rpc_free_iostats - release an rpc_iostats structure + * @stats: doomed rpc_iostats structure + * + */ +void rpc_free_iostats(struct rpc_iostats *stats) +{ + kfree(stats); +} +EXPORT_SYMBOL(rpc_free_iostats); + +/** + * rpc_count_iostats - tally up per-task stats + * @task: completed rpc_task + * + * Relies on the caller for serialization. + */ +void rpc_count_iostats(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_iostats *stats = task->tk_client->cl_metrics; + struct rpc_iostats *op_metrics; + long rtt, execute, queue; + + if (!stats || !req) + return; + op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx]; + + op_metrics->om_ops++; + op_metrics->om_ntrans += req->rq_ntrans; + op_metrics->om_timeouts += task->tk_timeouts; + + op_metrics->om_bytes_sent += task->tk_bytes_sent; + op_metrics->om_bytes_recv += req->rq_received; + + queue = (long)req->rq_xtime - task->tk_start; + if (queue < 0) + queue = -queue; + op_metrics->om_queue += queue; + + rtt = task->tk_rtt; + if (rtt < 0) + rtt = -rtt; + op_metrics->om_rtt += rtt; + + execute = (long)jiffies - task->tk_start; + if (execute < 0) + execute = -execute; + op_metrics->om_execute += execute; +} + +void _print_name(struct seq_file *seq, unsigned int op, struct rpc_procinfo *procs) +{ + if (procs[op].p_name) + seq_printf(seq, "\t%12s: ", procs[op].p_name); + else if (op == 0) + seq_printf(seq, "\t NULL: "); + else + seq_printf(seq, "\t%12u: ", op); +} + +#define MILLISECS_PER_JIFFY (1000 / HZ) + +void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) +{ + struct rpc_iostats *stats = clnt->cl_metrics; + struct rpc_xprt *xprt = clnt->cl_xprt; + unsigned int op, maxproc = clnt->cl_maxproc; + + if (!stats) + return; + + seq_printf(seq, "\tRPC iostats version: %s ", RPC_IOSTATS_VERS); + seq_printf(seq, "p/v: %u/%u (%s)\n", + clnt->cl_prog, clnt->cl_vers, clnt->cl_protname); + + if (xprt) + xprt->ops->print_stats(xprt, seq); + + seq_printf(seq, "\tper-op statistics\n"); + for (op = 0; op < maxproc; op++) { + struct rpc_iostats *metrics = &stats[op]; + _print_name(seq, op, clnt->cl_procinfo); + seq_printf(seq, "%lu %lu %lu %Lu %Lu %Lu %Lu %Lu\n", + metrics->om_ops, + metrics->om_ntrans, + metrics->om_timeouts, + metrics->om_bytes_sent, + metrics->om_bytes_recv, + metrics->om_queue * MILLISECS_PER_JIFFY, + metrics->om_rtt * MILLISECS_PER_JIFFY, + metrics->om_execute * MILLISECS_PER_JIFFY); + } +} +EXPORT_SYMBOL(rpc_print_iostats); + /* * Register/unregister RPC proc files */ static inline struct proc_dir_entry * -do_register(const char *name, void *data, struct file_operations *fops) +do_register(const char *name, void *data, const struct file_operations *fops) { struct proc_dir_entry *ent; @@ -138,7 +253,7 @@ rpc_proc_unregister(const char *name) } struct proc_dir_entry * -svc_proc_register(struct svc_stat *statp, struct file_operations *fops) +svc_proc_register(struct svc_stat *statp, const struct file_operations *fops) { return do_register(statp->program->pg_name, statp, fops); } diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 9f737320359..769114f0f88 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -105,8 +105,6 @@ EXPORT_SYMBOL(auth_unix_lookup); EXPORT_SYMBOL(cache_check); EXPORT_SYMBOL(cache_flush); EXPORT_SYMBOL(cache_purge); -EXPORT_SYMBOL(cache_fresh); -EXPORT_SYMBOL(cache_init); EXPORT_SYMBOL(cache_register); EXPORT_SYMBOL(cache_unregister); EXPORT_SYMBOL(qword_add); @@ -142,6 +140,7 @@ EXPORT_SYMBOL(nlm_debug); extern int register_rpc_pipefs(void); extern void unregister_rpc_pipefs(void); +extern struct cache_detail ip_map_cache; static int __init init_sunrpc(void) @@ -158,7 +157,6 @@ init_sunrpc(void) #ifdef CONFIG_PROC_FS rpc_proc_init(); #endif - cache_register(&auth_domain_cache); cache_register(&ip_map_cache); out: return err; @@ -169,8 +167,6 @@ cleanup_sunrpc(void) { unregister_rpc_pipefs(); rpc_destroy_mempool(); - if (cache_unregister(&auth_domain_cache)) - printk(KERN_ERR "sunrpc: failed to unregister auth_domain cache\n"); if (cache_unregister(&ip_map_cache)) printk(KERN_ERR "sunrpc: failed to unregister ip_map cache\n"); #ifdef RPC_DEBUG diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index dda4f0c6351..5b28c617680 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -106,112 +106,56 @@ svc_auth_unregister(rpc_authflavor_t flavor) EXPORT_SYMBOL(svc_auth_unregister); /************************************************** - * cache for domain name to auth_domain - * Entries are only added by flavours which will normally - * have a structure that 'inherits' from auth_domain. - * e.g. when an IP -> domainname is given to auth_unix, - * and the domain name doesn't exist, it will create a - * auth_unix_domain and add it to this hash table. - * If it finds the name does exist, but isn't AUTH_UNIX, - * it will complain. + * 'auth_domains' are stored in a hash table indexed by name. + * When the last reference to an 'auth_domain' is dropped, + * the object is unhashed and freed. + * If auth_domain_lookup fails to find an entry, it will return + * it's second argument 'new'. If this is non-null, it will + * have been atomically linked into the table. */ -/* - * Auth auth_domain cache is somewhat different to other caches, - * largely because the entries are possibly of different types: - * each auth flavour has it's own type. - * One consequence of this that DefineCacheLookup cannot - * allocate a new structure as it cannot know the size. - * Notice that the "INIT" code fragment is quite different - * from other caches. When auth_domain_lookup might be - * creating a new domain, the new domain is passed in - * complete and it is used as-is rather than being copied into - * another structure. - */ #define DN_HASHBITS 6 #define DN_HASHMAX (1<<DN_HASHBITS) #define DN_HASHMASK (DN_HASHMAX-1) -static struct cache_head *auth_domain_table[DN_HASHMAX]; - -static void auth_domain_drop(struct cache_head *item, struct cache_detail *cd) -{ - struct auth_domain *dom = container_of(item, struct auth_domain, h); - if (cache_put(item,cd)) - authtab[dom->flavour]->domain_release(dom); -} - - -struct cache_detail auth_domain_cache = { - .owner = THIS_MODULE, - .hash_size = DN_HASHMAX, - .hash_table = auth_domain_table, - .name = "auth.domain", - .cache_put = auth_domain_drop, -}; +static struct hlist_head auth_domain_table[DN_HASHMAX]; +static spinlock_t auth_domain_lock = SPIN_LOCK_UNLOCKED; void auth_domain_put(struct auth_domain *dom) { - auth_domain_drop(&dom->h, &auth_domain_cache); -} - -static inline int auth_domain_hash(struct auth_domain *item) -{ - return hash_str(item->name, DN_HASHBITS); -} -static inline int auth_domain_match(struct auth_domain *tmp, struct auth_domain *item) -{ - return strcmp(tmp->name, item->name) == 0; + if (atomic_dec_and_lock(&dom->ref.refcount, &auth_domain_lock)) { + hlist_del(&dom->hash); + dom->flavour->domain_release(dom); + } } struct auth_domain * -auth_domain_lookup(struct auth_domain *item, int set) +auth_domain_lookup(char *name, struct auth_domain *new) { - struct auth_domain *tmp = NULL; - struct cache_head **hp, **head; - head = &auth_domain_cache.hash_table[auth_domain_hash(item)]; - - if (set) - write_lock(&auth_domain_cache.hash_lock); - else - read_lock(&auth_domain_cache.hash_lock); - for (hp=head; *hp != NULL; hp = &tmp->h.next) { - tmp = container_of(*hp, struct auth_domain, h); - if (!auth_domain_match(tmp, item)) - continue; - if (!set) { - cache_get(&tmp->h); - goto out_noset; + struct auth_domain *hp; + struct hlist_head *head; + struct hlist_node *np; + + head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; + + spin_lock(&auth_domain_lock); + + hlist_for_each_entry(hp, np, head, hash) { + if (strcmp(hp->name, name)==0) { + kref_get(&hp->ref); + spin_unlock(&auth_domain_lock); + return hp; } - *hp = tmp->h.next; - tmp->h.next = NULL; - auth_domain_drop(&tmp->h, &auth_domain_cache); - goto out_set; } - /* Didn't find anything */ - if (!set) - goto out_nada; - auth_domain_cache.entries++; -out_set: - item->h.next = *head; - *head = &item->h; - cache_get(&item->h); - write_unlock(&auth_domain_cache.hash_lock); - cache_fresh(&auth_domain_cache, &item->h, item->h.expiry_time); - cache_get(&item->h); - return item; -out_nada: - tmp = NULL; -out_noset: - read_unlock(&auth_domain_cache.hash_lock); - return tmp; + if (new) { + hlist_add_head(&new->hash, head); + kref_get(&new->ref); + } + spin_unlock(&auth_domain_lock); + return new; } struct auth_domain *auth_domain_find(char *name) { - struct auth_domain *rv, ad; - - ad.name = name; - rv = auth_domain_lookup(&ad, 0); - return rv; + return auth_domain_lookup(name, NULL); } diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 3e6c694bbad..7e5707e2d6b 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -27,41 +27,35 @@ struct unix_domain { /* other stuff later */ }; +extern struct auth_ops svcauth_unix; + struct auth_domain *unix_domain_find(char *name) { - struct auth_domain *rv, ud; - struct unix_domain *new; - - ud.name = name; - - rv = auth_domain_lookup(&ud, 0); - - foundit: - if (rv && rv->flavour != RPC_AUTH_UNIX) { - auth_domain_put(rv); - return NULL; - } - if (rv) - return rv; - - new = kmalloc(sizeof(*new), GFP_KERNEL); - if (new == NULL) - return NULL; - cache_init(&new->h.h); - new->h.name = kstrdup(name, GFP_KERNEL); - new->h.flavour = RPC_AUTH_UNIX; - new->addr_changes = 0; - new->h.h.expiry_time = NEVER; - - rv = auth_domain_lookup(&new->h, 2); - if (rv == &new->h) { - if (atomic_dec_and_test(&new->h.h.refcnt)) BUG(); - } else { - auth_domain_put(&new->h); - goto foundit; + struct auth_domain *rv; + struct unix_domain *new = NULL; + + rv = auth_domain_lookup(name, NULL); + while(1) { + if (rv) { + if (new && rv != &new->h) + auth_domain_put(&new->h); + + if (rv->flavour != &svcauth_unix) { + auth_domain_put(rv); + return NULL; + } + return rv; + } + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (new == NULL) + return NULL; + kref_init(&new->h.ref); + new->h.name = kstrdup(name, GFP_KERNEL); + new->h.flavour = &svcauth_unix; + new->addr_changes = 0; + rv = auth_domain_lookup(name, &new->h); } - - return rv; } static void svcauth_unix_domain_release(struct auth_domain *dom) @@ -90,15 +84,15 @@ struct ip_map { }; static struct cache_head *ip_table[IP_HASHMAX]; -static void ip_map_put(struct cache_head *item, struct cache_detail *cd) +static void ip_map_put(struct kref *kref) { + struct cache_head *item = container_of(kref, struct cache_head, ref); struct ip_map *im = container_of(item, struct ip_map,h); - if (cache_put(item, cd)) { - if (test_bit(CACHE_VALID, &item->flags) && - !test_bit(CACHE_NEGATIVE, &item->flags)) - auth_domain_put(&im->m_client->h); - kfree(im); - } + + if (test_bit(CACHE_VALID, &item->flags) && + !test_bit(CACHE_NEGATIVE, &item->flags)) + auth_domain_put(&im->m_client->h); + kfree(im); } #if IP_HASHBITS == 8 @@ -112,28 +106,38 @@ static inline int hash_ip(unsigned long ip) return (hash ^ (hash>>8)) & 0xff; } #endif - -static inline int ip_map_hash(struct ip_map *item) -{ - return hash_str(item->m_class, IP_HASHBITS) ^ - hash_ip((unsigned long)item->m_addr.s_addr); -} -static inline int ip_map_match(struct ip_map *item, struct ip_map *tmp) +static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) { - return strcmp(tmp->m_class, item->m_class) == 0 - && tmp->m_addr.s_addr == item->m_addr.s_addr; + struct ip_map *orig = container_of(corig, struct ip_map, h); + struct ip_map *new = container_of(cnew, struct ip_map, h); + return strcmp(orig->m_class, new->m_class) == 0 + && orig->m_addr.s_addr == new->m_addr.s_addr; } -static inline void ip_map_init(struct ip_map *new, struct ip_map *item) +static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) { + struct ip_map *new = container_of(cnew, struct ip_map, h); + struct ip_map *item = container_of(citem, struct ip_map, h); + strcpy(new->m_class, item->m_class); new->m_addr.s_addr = item->m_addr.s_addr; } -static inline void ip_map_update(struct ip_map *new, struct ip_map *item) +static void update(struct cache_head *cnew, struct cache_head *citem) { - cache_get(&item->m_client->h.h); + struct ip_map *new = container_of(cnew, struct ip_map, h); + struct ip_map *item = container_of(citem, struct ip_map, h); + + kref_get(&item->m_client->h.ref); new->m_client = item->m_client; new->m_add_change = item->m_add_change; } +static struct cache_head *ip_map_alloc(void) +{ + struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL); + if (i) + return &i->h; + else + return NULL; +} static void ip_map_request(struct cache_detail *cd, struct cache_head *h, @@ -154,7 +158,8 @@ static void ip_map_request(struct cache_detail *cd, (*bpp)[-1] = '\n'; } -static struct ip_map *ip_map_lookup(struct ip_map *, int); +static struct ip_map *ip_map_lookup(char *class, struct in_addr addr); +static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry); static int ip_map_parse(struct cache_detail *cd, char *mesg, int mlen) @@ -166,7 +171,11 @@ static int ip_map_parse(struct cache_detail *cd, int len; int b1,b2,b3,b4; char c; - struct ip_map ipm, *ipmp; + char class[8]; + struct in_addr addr; + int err; + + struct ip_map *ipmp; struct auth_domain *dom; time_t expiry; @@ -175,7 +184,7 @@ static int ip_map_parse(struct cache_detail *cd, mesg[mlen-1] = 0; /* class */ - len = qword_get(&mesg, ipm.m_class, sizeof(ipm.m_class)); + len = qword_get(&mesg, class, sizeof(class)); if (len <= 0) return -EINVAL; /* ip address */ @@ -200,25 +209,22 @@ static int ip_map_parse(struct cache_detail *cd, } else dom = NULL; - ipm.m_addr.s_addr = + addr.s_addr = htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4); - ipm.h.flags = 0; - if (dom) { - ipm.m_client = container_of(dom, struct unix_domain, h); - ipm.m_add_change = ipm.m_client->addr_changes; + + ipmp = ip_map_lookup(class,addr); + if (ipmp) { + err = ip_map_update(ipmp, + container_of(dom, struct unix_domain, h), + expiry); } else - set_bit(CACHE_NEGATIVE, &ipm.h.flags); - ipm.h.expiry_time = expiry; + err = -ENOMEM; - ipmp = ip_map_lookup(&ipm, 1); - if (ipmp) - ip_map_put(&ipmp->h, &ip_map_cache); if (dom) auth_domain_put(dom); - if (!ipmp) - return -ENOMEM; + cache_flush(); - return 0; + return err; } static int ip_map_show(struct seq_file *m, @@ -262,32 +268,70 @@ struct cache_detail ip_map_cache = { .cache_request = ip_map_request, .cache_parse = ip_map_parse, .cache_show = ip_map_show, + .match = ip_map_match, + .init = ip_map_init, + .update = update, + .alloc = ip_map_alloc, }; -static DefineSimpleCacheLookup(ip_map, 0) +static struct ip_map *ip_map_lookup(char *class, struct in_addr addr) +{ + struct ip_map ip; + struct cache_head *ch; + + strcpy(ip.m_class, class); + ip.m_addr = addr; + ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h, + hash_str(class, IP_HASHBITS) ^ + hash_ip((unsigned long)addr.s_addr)); + + if (ch) + return container_of(ch, struct ip_map, h); + else + return NULL; +} +static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry) +{ + struct ip_map ip; + struct cache_head *ch; + + ip.m_client = udom; + ip.h.flags = 0; + if (!udom) + set_bit(CACHE_NEGATIVE, &ip.h.flags); + else { + ip.m_add_change = udom->addr_changes; + /* if this is from the legacy set_client system call, + * we need m_add_change to be one higher + */ + if (expiry == NEVER) + ip.m_add_change++; + } + ip.h.expiry_time = expiry; + ch = sunrpc_cache_update(&ip_map_cache, + &ip.h, &ipm->h, + hash_str(ipm->m_class, IP_HASHBITS) ^ + hash_ip((unsigned long)ipm->m_addr.s_addr)); + if (!ch) + return -ENOMEM; + cache_put(ch, &ip_map_cache); + return 0; +} int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom) { struct unix_domain *udom; - struct ip_map ip, *ipmp; + struct ip_map *ipmp; - if (dom->flavour != RPC_AUTH_UNIX) + if (dom->flavour != &svcauth_unix) return -EINVAL; udom = container_of(dom, struct unix_domain, h); - strcpy(ip.m_class, "nfsd"); - ip.m_addr = addr; - ip.m_client = udom; - ip.m_add_change = udom->addr_changes+1; - ip.h.flags = 0; - ip.h.expiry_time = NEVER; - - ipmp = ip_map_lookup(&ip, 1); + ipmp = ip_map_lookup("nfsd", addr); - if (ipmp) { - ip_map_put(&ipmp->h, &ip_map_cache); - return 0; - } else + if (ipmp) + return ip_map_update(ipmp, udom, NEVER); + else return -ENOMEM; } @@ -295,7 +339,7 @@ int auth_unix_forget_old(struct auth_domain *dom) { struct unix_domain *udom; - if (dom->flavour != RPC_AUTH_UNIX) + if (dom->flavour != &svcauth_unix) return -EINVAL; udom = container_of(dom, struct unix_domain, h); udom->addr_changes++; @@ -310,7 +354,7 @@ struct auth_domain *auth_unix_lookup(struct in_addr addr) strcpy(key.m_class, "nfsd"); key.m_addr = addr; - ipm = ip_map_lookup(&key, 0); + ipm = ip_map_lookup("nfsd", addr); if (!ipm) return NULL; @@ -323,31 +367,28 @@ struct auth_domain *auth_unix_lookup(struct in_addr addr) rv = NULL; } else { rv = &ipm->m_client->h; - cache_get(&rv->h); + kref_get(&rv->ref); } - ip_map_put(&ipm->h, &ip_map_cache); + cache_put(&ipm->h, &ip_map_cache); return rv; } void svcauth_unix_purge(void) { cache_purge(&ip_map_cache); - cache_purge(&auth_domain_cache); } static int svcauth_unix_set_client(struct svc_rqst *rqstp) { - struct ip_map key, *ipm; + struct ip_map *ipm; rqstp->rq_client = NULL; if (rqstp->rq_proc == 0) return SVC_OK; - strcpy(key.m_class, rqstp->rq_server->sv_program->pg_class); - key.m_addr = rqstp->rq_addr.sin_addr; - - ipm = ip_map_lookup(&key, 0); + ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, + rqstp->rq_addr.sin_addr); if (ipm == NULL) return SVC_DENIED; @@ -361,8 +402,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp) return SVC_DENIED; case 0: rqstp->rq_client = &ipm->m_client->h; - cache_get(&rqstp->rq_client->h); - ip_map_put(&ipm->h, &ip_map_cache); + kref_get(&rqstp->rq_client->ref); + cache_put(&ipm->h, &ip_map_cache); break; } return SVC_OK; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 8ff2c8acb22..4dd5b3cfe75 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -44,13 +44,13 @@ #include <linux/random.h> #include <linux/sunrpc/clnt.h> +#include <linux/sunrpc/metrics.h> /* * Local variables */ #ifdef RPC_DEBUG -# undef RPC_DEBUG_DATA # define RPCDBG_FACILITY RPCDBG_XPRT #endif @@ -548,6 +548,7 @@ void xprt_connect(struct rpc_task *task) task->tk_timeout = xprt->connect_timeout; rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); + xprt->stat.connect_start = jiffies; xprt->ops->connect(task); } return; @@ -558,6 +559,8 @@ static void xprt_connect_status(struct rpc_task *task) struct rpc_xprt *xprt = task->tk_xprt; if (task->tk_status >= 0) { + xprt->stat.connect_count++; + xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; dprintk("RPC: %4d xprt_connect_status: connection established\n", task->tk_pid); return; @@ -601,16 +604,14 @@ static void xprt_connect_status(struct rpc_task *task) struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) { struct list_head *pos; - struct rpc_rqst *req = NULL; list_for_each(pos, &xprt->recv) { struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list); - if (entry->rq_xid == xid) { - req = entry; - break; - } + if (entry->rq_xid == xid) + return entry; } - return req; + xprt->stat.bad_xids++; + return NULL; } /** @@ -646,7 +647,12 @@ void xprt_complete_rqst(struct rpc_task *task, int copied) dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", task->tk_pid, ntohl(req->rq_xid), copied); + task->tk_xprt->stat.recvs++; + task->tk_rtt = (long)jiffies - req->rq_xtime; + list_del_init(&req->rq_list); + /* Ensure all writes are done before we update req->rq_received */ + smp_wmb(); req->rq_received = req->rq_private_buf.len = copied; rpc_wake_up_task(task); } @@ -723,7 +729,6 @@ void xprt_transmit(struct rpc_task *task) dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); - smp_rmb(); if (!req->rq_received) { if (list_empty(&req->rq_list)) { spin_lock_bh(&xprt->transport_lock); @@ -744,12 +749,19 @@ void xprt_transmit(struct rpc_task *task) if (status == 0) { dprintk("RPC: %4d xmit complete\n", task->tk_pid); spin_lock_bh(&xprt->transport_lock); + xprt->ops->set_retrans_timeout(task); + + xprt->stat.sends++; + xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; + xprt->stat.bklog_u += xprt->backlog.qlen; + /* Don't race with disconnect */ if (!xprt_connected(xprt)) task->tk_status = -ENOTCONN; else if (!req->rq_received) rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); + xprt->ops->release_xprt(xprt, task); spin_unlock_bh(&xprt->transport_lock); return; @@ -848,6 +860,7 @@ void xprt_release(struct rpc_task *task) if (!(req = task->tk_rqstp)) return; + rpc_count_iostats(task); spin_lock_bh(&xprt->transport_lock); xprt->ops->release_xprt(xprt, task); if (xprt->ops->release_request) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c458f8d1d6d..4b4e7dfdff1 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -382,6 +382,7 @@ static int xs_tcp_send_request(struct rpc_task *task) /* If we've sent the entire packet, immediately * reset the count of bytes sent. */ req->rq_bytes_sent += status; + task->tk_bytes_sent += status; if (likely(req->rq_bytes_sent >= req->rq_slen)) { req->rq_bytes_sent = 0; return 0; @@ -1114,6 +1115,8 @@ static void xs_tcp_connect_worker(void *args) } /* Tell the socket layer to start connecting... */ + xprt->stat.connect_count++; + xprt->stat.connect_start = jiffies; status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, sizeof(xprt->addr), O_NONBLOCK); dprintk("RPC: %p connect status %d connected %d sock state %d\n", @@ -1177,6 +1180,50 @@ static void xs_connect(struct rpc_task *task) } } +/** + * xs_udp_print_stats - display UDP socket-specifc stats + * @xprt: rpc_xprt struct containing statistics + * @seq: output file + * + */ +static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) +{ + seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n", + xprt->port, + xprt->stat.bind_count, + xprt->stat.sends, + xprt->stat.recvs, + xprt->stat.bad_xids, + xprt->stat.req_u, + xprt->stat.bklog_u); +} + +/** + * xs_tcp_print_stats - display TCP socket-specifc stats + * @xprt: rpc_xprt struct containing statistics + * @seq: output file + * + */ +static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) +{ + long idle_time = 0; + + if (xprt_connected(xprt)) + idle_time = (long)(jiffies - xprt->last_used) / HZ; + + seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n", + xprt->port, + xprt->stat.bind_count, + xprt->stat.connect_count, + xprt->stat.connect_time, + idle_time, + xprt->stat.sends, + xprt->stat.recvs, + xprt->stat.bad_xids, + xprt->stat.req_u, + xprt->stat.bklog_u); +} + static struct rpc_xprt_ops xs_udp_ops = { .set_buffer_size = xs_udp_set_buffer_size, .reserve_xprt = xprt_reserve_xprt_cong, @@ -1191,6 +1238,7 @@ static struct rpc_xprt_ops xs_udp_ops = { .release_request = xprt_release_rqst_cong, .close = xs_close, .destroy = xs_destroy, + .print_stats = xs_udp_print_stats, }; static struct rpc_xprt_ops xs_tcp_ops = { @@ -1204,6 +1252,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { .set_retrans_timeout = xprt_set_retrans_timeout_def, .close = xs_close, .destroy = xs_destroy, + .print_stats = xs_tcp_print_stats, }; /** |