aboutsummaryrefslogtreecommitdiff
path: root/fs/nfs
diff options
context:
space:
mode:
authorChuck Lever <cel@netapp.com>2006-06-20 12:55:19 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-06-24 13:11:38 -0400
commitb1c5921c5b715c207d7fe77cd7aaafbb322f09f5 (patch)
treec8a56c56740efa728b7d3b935b6cb09330a4817f /fs/nfs
parent816724e65c72a90a44fbad0ef0b59b186c85fa90 (diff)
NFS: Separate functions for counting outstanding NFS direct I/Os
Factor out the logic that increments and decrements the outstanding I/O count. This will be a commonly used bit of code in upcoming patches. Also make this an atomic_t again, since it will be very often manipulated outside dreq->spin lock. Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/direct.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 402005c35ab..d78c61a41ec 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -80,8 +80,8 @@ struct nfs_direct_req {
unsigned int npages; /* count of pages */
/* completion state */
+ atomic_t io_count; /* i/os we're waiting for */
spinlock_t lock; /* protect completion state */
- int outstanding; /* i/os we're waiting for */
ssize_t count, /* bytes actually processed */
error; /* any reported error */
struct completion completion; /* wait for i/o completion */
@@ -97,6 +97,16 @@ struct nfs_direct_req {
static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
+static inline void get_dreq(struct nfs_direct_req *dreq)
+{
+ atomic_inc(&dreq->io_count);
+}
+
+static inline int put_dreq(struct nfs_direct_req *dreq)
+{
+ return atomic_dec_and_test(&dreq->io_count);
+}
+
/**
* nfs_direct_IO - NFS address space operation for direct I/O
* @rw: direction (read or write)
@@ -180,7 +190,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
dreq->iocb = NULL;
dreq->ctx = NULL;
spin_lock_init(&dreq->lock);
- dreq->outstanding = 0;
+ atomic_set(&dreq->io_count, 0);
dreq->count = 0;
dreq->error = 0;
dreq->flags = 0;
@@ -278,7 +288,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
list_add(&data->pages, list);
data->req = (struct nfs_page *) dreq;
- dreq->outstanding++;
+ get_dreq(dreq);
if (nbytes <= rsize)
break;
nbytes -= rsize;
@@ -302,13 +312,10 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
else
dreq->error = task->tk_status;
- if (--dreq->outstanding) {
- spin_unlock(&dreq->lock);
- return;
- }
-
spin_unlock(&dreq->lock);
- nfs_direct_complete(dreq);
+
+ if (put_dreq(dreq))
+ nfs_direct_complete(dreq);
}
static const struct rpc_call_ops nfs_read_direct_ops = {
@@ -432,7 +439,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
list_splice_init(&dreq->rewrite_list, &dreq->list);
list_for_each(pos, &dreq->list)
- dreq->outstanding++;
+ get_dreq(dreq);
dreq->count = 0;
nfs_direct_write_schedule(dreq, FLUSH_STABLE);
@@ -564,7 +571,7 @@ static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize
list_add(&data->pages, list);
data->req = (struct nfs_page *) dreq;
- dreq->outstanding++;
+ get_dreq(dreq);
if (nbytes <= wsize)
break;
nbytes -= wsize;
@@ -620,14 +627,8 @@ static void nfs_direct_write_release(void *calldata)
struct nfs_write_data *data = calldata;
struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
- spin_lock(&dreq->lock);
- if (--dreq->outstanding) {
- spin_unlock(&dreq->lock);
- return;
- }
- spin_unlock(&dreq->lock);
-
- nfs_direct_write_complete(dreq, data->inode);
+ if (put_dreq(dreq))
+ nfs_direct_write_complete(dreq, data->inode);
}
static const struct rpc_call_ops nfs_write_direct_ops = {