aboutsummaryrefslogtreecommitdiff
path: root/fs/fuse/dev.c
diff options
context:
space:
mode:
authorMiklos Szeredi <mszeredi@suse.cz>2007-10-16 23:31:04 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 08:43:04 -0700
commita131de0a482ac95e6469f56981c7b063593fdc5d (patch)
tree0799985119f15525cc8c3c93a4cacad307405a5a /fs/fuse/dev.c
parent819c4b3b4009275caae973691378235c177a1429 (diff)
fuse: no abort on interrupt
Don't set 'aborted' flag on a request if it's interrupted. We have to wait for the answer anyway, and this would only a very little time while copying the reply. This means, that write() on the fuse device will not return -ENOENT during normal operation, only if the filesystem is aborted by a forced umount or through the fusectl interface. This could simplify userspace code somewhat when backward compatibility with earlier kernel versions is not required. Signed-off-by: Miklos Szeredi <mszeredi@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r--fs/fuse/dev.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c78f60e1ba5..cc6c2908fb1 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -273,28 +273,41 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
queue_interrupt(fc, req);
}
- if (req->force) {
- spin_unlock(&fc->lock);
- wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
- spin_lock(&fc->lock);
- } else {
+ if (!req->force) {
sigset_t oldset;
/* Only fatal signals may interrupt this */
block_sigs(&oldset);
wait_answer_interruptible(fc, req);
restore_sigs(&oldset);
+
+ if (req->aborted)
+ goto aborted;
+ if (req->state == FUSE_REQ_FINISHED)
+ return;
+
+ /* Request is not yet in userspace, bail out */
+ if (req->state == FUSE_REQ_PENDING) {
+ list_del(&req->list);
+ __fuse_put_request(req);
+ req->out.h.error = -EINTR;
+ return;
+ }
}
- if (req->aborted)
- goto aborted;
- if (req->state == FUSE_REQ_FINISHED)
- return;
+ /*
+ * Either request is already in userspace, or it was forced.
+ * Wait it out.
+ */
+ spin_unlock(&fc->lock);
+ wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
+ spin_lock(&fc->lock);
- req->out.h.error = -EINTR;
- req->aborted = 1;
+ if (!req->aborted)
+ return;
aborted:
+ BUG_ON(req->state != FUSE_REQ_FINISHED);
if (req->locked) {
/* This is uninterruptible sleep, because data is
being copied to/from the buffers of req. During
@@ -305,14 +318,6 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
wait_event(req->waitq, !req->locked);
spin_lock(&fc->lock);
}
- if (req->state == FUSE_REQ_PENDING) {
- list_del(&req->list);
- __fuse_put_request(req);
- } else if (req->state == FUSE_REQ_SENT) {
- spin_unlock(&fc->lock);
- wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
- spin_lock(&fc->lock);
- }
}
static unsigned len_args(unsigned numargs, struct fuse_arg *args)