From 440f0affe247e9990c8f8778f1861da4fd7d5e50 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:01 +0000 Subject: FS-Cache: Annotate slow-work runqueue proc lines for FS-Cache work items Annotate slow-work runqueue proc lines for FS-Cache work items. Objects include the object ID and the state. Operations include the object ID, the operation ID and the operation type and state. Signed-off-by: David Howells --- fs/fscache/page.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) (limited to 'fs/fscache/page.c') diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 2568e0eb644..e8bbc395cef 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -63,14 +63,19 @@ static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *p static void fscache_attr_changed_op(struct fscache_operation *op) { struct fscache_object *object = op->object; + int ret; _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); fscache_stat(&fscache_n_attr_changed_calls); - if (fscache_object_is_active(object) && - object->cache->ops->attr_changed(object) < 0) - fscache_abort_object(object); + if (fscache_object_is_active(object)) { + fscache_set_op_state(op, "CallFS"); + ret = object->cache->ops->attr_changed(object); + fscache_set_op_state(op, "Done"); + if (ret < 0) + fscache_abort_object(object); + } _leave(""); } @@ -99,6 +104,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) fscache_operation_init(op, NULL); fscache_operation_init_slow(op, fscache_attr_changed_op); op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); + fscache_set_op_name(op, "Attr"); spin_lock(&cookie->lock); @@ -184,6 +190,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval( op->start_time = jiffies; INIT_WORK(&op->op.fast_work, fscache_retrieval_work); INIT_LIST_HEAD(&op->to_do); + fscache_set_op_name(&op->op, "Retr"); return op; } @@ -257,6 +264,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, _leave(" = -ENOMEM"); return -ENOMEM; } + fscache_set_op_name(&op->op, "RetrRA1"); spin_lock(&cookie->lock); @@ -369,6 +377,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, op = fscache_alloc_retrieval(mapping, end_io_func, context); if (!op) return -ENOMEM; + fscache_set_op_name(&op->op, "RetrRAN"); spin_lock(&cookie->lock); @@ -461,6 +470,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, op = fscache_alloc_retrieval(page->mapping, NULL, NULL); if (!op) return -ENOMEM; + fscache_set_op_name(&op->op, "RetrAL1"); spin_lock(&cookie->lock); @@ -529,6 +539,8 @@ static void fscache_write_op(struct fscache_operation *_op) _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); + fscache_set_op_state(&op->op, "GetPage"); + spin_lock(&cookie->lock); spin_lock(&object->lock); @@ -559,13 +571,17 @@ static void fscache_write_op(struct fscache_operation *_op) spin_unlock(&cookie->lock); if (page) { + fscache_set_op_state(&op->op, "Store"); ret = object->cache->ops->write_page(op, page); + fscache_set_op_state(&op->op, "EndWrite"); fscache_end_page_write(cookie, page); page_cache_release(page); - if (ret < 0) + if (ret < 0) { + fscache_set_op_state(&op->op, "Abort"); fscache_abort_object(object); - else + } else { fscache_enqueue_operation(&op->op); + } } _leave(""); @@ -634,6 +650,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, fscache_operation_init(&op->op, fscache_release_write_op); fscache_operation_init_slow(&op->op, fscache_write_op); op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); + fscache_set_op_name(&op->op, "Write1"); ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); if (ret < 0) -- cgit v1.2.3 From 4fbf4291aa15926cd4fdca0ffe9122e89d0459db Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:04 +0000 Subject: FS-Cache: Allow the current state of all objects to be dumped Allow the current state of all fscache objects to be dumped by doing: cat /proc/fs/fscache/objects By default, all objects and all fields will be shown. This can be restricted by adding a suitable key to one of the caller's keyrings (such as the session keyring): keyctl add user fscache:objlist "" @s The are: K Show hexdump of object key (don't show if not given) A Show hexdump of object aux data (don't show if not given) And paired restrictions: C Show objects that have a cookie c Show objects that don't have a cookie B Show objects that are busy b Show objects that aren't busy W Show objects that have pending writes w Show objects that don't have pending writes R Show objects that have outstanding reads r Show objects that don't have outstanding reads S Show objects that have slow work queued s Show objects that don't have slow work queued If neither side of a restriction pair is given, then both are implied. For example: keyctl add user fscache:objlist KB @s shows objects that are busy, and lists their object keys, but does not dump their auxiliary data. It also implies "CcWwRrSs", but as 'B' is given, 'b' is not implied. Signed-off-by: David Howells --- fs/fscache/page.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs/fscache/page.c') diff --git a/fs/fscache/page.c b/fs/fscache/page.c index e8bbc395cef..c5973e38ce3 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -275,6 +275,9 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); + atomic_inc(&object->n_reads); + set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); + if (fscache_submit_op(object, &op->op) < 0) goto nobufs_unlock; spin_unlock(&cookie->lock); @@ -386,6 +389,9 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); + atomic_inc(&object->n_reads); + set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); + if (fscache_submit_op(object, &op->op) < 0) goto nobufs_unlock; spin_unlock(&cookie->lock); -- cgit v1.2.3 From 52bd75fdb135d6133d878ae60c6e7e3f4ebc1cfc Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:08 +0000 Subject: FS-Cache: Add counters for entry/exit to/from cache operation functions Count entries to and exits from cache operation table functions. Maintain these as a single counter that's added to or removed from as appropriate. Signed-off-by: David Howells --- fs/fscache/page.c | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) (limited to 'fs/fscache/page.c') diff --git a/fs/fscache/page.c b/fs/fscache/page.c index c5973e38ce3..250dfd34c07 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -71,7 +71,9 @@ static void fscache_attr_changed_op(struct fscache_operation *op) if (fscache_object_is_active(object)) { fscache_set_op_state(op, "CallFS"); + fscache_stat(&fscache_n_cop_attr_changed); ret = object->cache->ops->attr_changed(object); + fscache_stat_d(&fscache_n_cop_attr_changed); fscache_set_op_state(op, "Done"); if (ret < 0) fscache_abort_object(object); @@ -300,11 +302,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, /* ask the cache to honour the operation */ if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { + fscache_stat(&fscache_n_cop_allocate_page); ret = object->cache->ops->allocate_page(op, page, gfp); + fscache_stat_d(&fscache_n_cop_allocate_page); if (ret == 0) ret = -ENODATA; } else { + fscache_stat(&fscache_n_cop_read_or_alloc_page); ret = object->cache->ops->read_or_alloc_page(op, page, gfp); + fscache_stat_d(&fscache_n_cop_read_or_alloc_page); } if (ret == -ENOMEM) @@ -358,7 +364,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, void *context, gfp_t gfp) { - fscache_pages_retrieval_func_t func; struct fscache_retrieval *op; struct fscache_object *object; int ret; @@ -413,11 +418,17 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, } /* ask the cache to honour the operation */ - if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) - func = object->cache->ops->allocate_pages; - else - func = object->cache->ops->read_or_alloc_pages; - ret = func(op, pages, nr_pages, gfp); + if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { + fscache_stat(&fscache_n_cop_allocate_pages); + ret = object->cache->ops->allocate_pages( + op, pages, nr_pages, gfp); + fscache_stat_d(&fscache_n_cop_allocate_pages); + } else { + fscache_stat(&fscache_n_cop_read_or_alloc_pages); + ret = object->cache->ops->read_or_alloc_pages( + op, pages, nr_pages, gfp); + fscache_stat_d(&fscache_n_cop_read_or_alloc_pages); + } if (ret == -ENOMEM) fscache_stat(&fscache_n_retrievals_nomem); @@ -500,7 +511,9 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, } /* ask the cache to honour the operation */ + fscache_stat(&fscache_n_cop_allocate_page); ret = object->cache->ops->allocate_page(op, page, gfp); + fscache_stat_d(&fscache_n_cop_allocate_page); if (ret < 0) fscache_stat(&fscache_n_allocs_nobufs); @@ -578,7 +591,9 @@ static void fscache_write_op(struct fscache_operation *_op) if (page) { fscache_set_op_state(&op->op, "Store"); + fscache_stat(&fscache_n_cop_write_page); ret = object->cache->ops->write_page(op, page); + fscache_stat_d(&fscache_n_cop_write_page); fscache_set_op_state(&op->op, "EndWrite"); fscache_end_page_write(cookie, page); page_cache_release(page); @@ -786,7 +801,9 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) if (TestClearPageFsCache(page) && object->cache->ops->uncache_page) { /* the cache backend releases the cookie lock */ + fscache_stat(&fscache_n_cop_uncache_page); object->cache->ops->uncache_page(object, page); + fscache_stat_d(&fscache_n_cop_uncache_page); goto done; } -- cgit v1.2.3 From 5753c441889253e4323eee85f791a1d64cf08196 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:19 +0000 Subject: FS-Cache: Permit cache retrieval ops to be interrupted in the initial wait phase Permit the operations to retrieve data from the cache or to allocate space in the cache for future writes to be interrupted whilst they're waiting for permission for the operation to proceed. Typically this wait occurs whilst the cache object is being looked up on disk in the background. If an interruption occurs, and the operation has not yet been given the go-ahead to run, the operation is dequeued and cancelled, and control returns to the read operation of the netfs routine with none of the requested pages having been read or in any way marked as known by the cache. This means that the initial wait is done interruptibly rather than uninterruptibly. In addition, extra stats values are made available to show the number of ops cancelled and the number of cache space allocations interrupted. Signed-off-by: David Howells --- fs/fscache/page.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 7 deletions(-) (limited to 'fs/fscache/page.c') diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 250dfd34c07..e6f2e61133a 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -295,8 +295,20 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { _debug(">>> WT"); fscache_stat(&fscache_n_retrieval_op_waits); - wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); + if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) < 0) { + ret = fscache_cancel_op(&op->op); + if (ret == 0) { + ret = -ERESTARTSYS; + goto error; + } + + /* it's been removed from the pending queue by another + * party, so we should get to run shortly */ + wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit, TASK_UNINTERRUPTIBLE); + } _debug("<<< GO"); } @@ -313,6 +325,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, fscache_stat_d(&fscache_n_cop_read_or_alloc_page); } +error: if (ret == -ENOMEM) fscache_stat(&fscache_n_retrievals_nomem); else if (ret == -ERESTARTSYS) @@ -412,8 +425,20 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { _debug(">>> WT"); fscache_stat(&fscache_n_retrieval_op_waits); - wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); + if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) < 0) { + ret = fscache_cancel_op(&op->op); + if (ret == 0) { + ret = -ERESTARTSYS; + goto error; + } + + /* it's been removed from the pending queue by another + * party, so we should get to run shortly */ + wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit, TASK_UNINTERRUPTIBLE); + } _debug("<<< GO"); } @@ -430,6 +455,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, fscache_stat_d(&fscache_n_cop_read_or_alloc_pages); } +error: if (ret == -ENOMEM) fscache_stat(&fscache_n_retrievals_nomem); else if (ret == -ERESTARTSYS) @@ -505,8 +531,20 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { _debug(">>> WT"); fscache_stat(&fscache_n_alloc_op_waits); - wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); + if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) < 0) { + ret = fscache_cancel_op(&op->op); + if (ret == 0) { + ret = -ERESTARTSYS; + goto error; + } + + /* it's been removed from the pending queue by another + * party, so we should get to run shortly */ + wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit, TASK_UNINTERRUPTIBLE); + } _debug("<<< GO"); } @@ -515,7 +553,10 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, ret = object->cache->ops->allocate_page(op, page, gfp); fscache_stat_d(&fscache_n_cop_allocate_page); - if (ret < 0) +error: + if (ret == -ERESTARTSYS) + fscache_stat(&fscache_n_allocs_intr); + else if (ret < 0) fscache_stat(&fscache_n_allocs_nobufs); else fscache_stat(&fscache_n_allocs_ok); -- cgit v1.2.3 From 1bccf513ac49d44604ba1cddcc29f5886e70f1b6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:25 +0000 Subject: FS-Cache: Fix lock misorder in fscache_write_op() FS-Cache has two structs internally for keeping track of the internal state of a cached file: the fscache_cookie struct, which represents the netfs's state, and fscache_object struct, which represents the cache's state. Each has a pointer that points to the other (when both are in existence), and each has a spinlock for pointer maintenance. Since netfs operations approach these structures from the cookie side, they get the cookie lock first, then the object lock. Cache operations, on the other hand, approach from the object side, and get the object lock first. It is not then permitted for a cache operation to get the cookie lock whilst it is holding the object lock lest deadlock occur; instead, it must do one of two things: (1) increment the cookie usage counter, drop the object lock and then get both locks in order, or (2) simply hold the object lock as certain parts of the cookie may not be altered whilst the object lock is held. It is also not permitted to follow either pointer without holding the lock at the end you start with. To break the pointers between the cookie and the object, both locks must be held. fscache_write_op(), however, violates the locking rules: It attempts to get the cookie lock without (a) checking that the cookie pointer is a valid pointer, and (b) holding the object lock to protect the cookie pointer whilst it follows it. This is so that it can access the pending page store tree without interference from __fscache_write_page(). This is fixed by splitting the cookie lock, such that the page store tracking tree is protected by its own lock, and checking that the cookie pointer is non-NULL before we attempt to follow it whilst holding the object lock. The new lock is subordinate to both the cookie lock and the object lock, and so should be taken after those. Signed-off-by: David Howells --- fs/fscache/page.c | 52 +++++++++++++++++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 17 deletions(-) (limited to 'fs/fscache/page.c') diff --git a/fs/fscache/page.c b/fs/fscache/page.c index e6f2e61133a..3ea8897bc21 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -45,16 +45,26 @@ EXPORT_SYMBOL(__fscache_wait_on_page_write); /* * note that a page has finished being written to the cache */ -static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page) +static void fscache_end_page_write(struct fscache_object *object, + struct page *page) { - struct page *xpage; + struct fscache_cookie *cookie; + struct page *xpage = NULL; - spin_lock(&cookie->lock); - xpage = radix_tree_delete(&cookie->stores, page->index); - spin_unlock(&cookie->lock); - ASSERT(xpage != NULL); - - wake_up_bit(&cookie->flags, 0); + spin_lock(&object->lock); + cookie = object->cookie; + if (cookie) { + /* delete the page from the tree if it is now no longer + * pending */ + spin_lock(&cookie->stores_lock); + fscache_stat(&fscache_n_store_radix_deletes); + xpage = radix_tree_delete(&cookie->stores, page->index); + spin_unlock(&cookie->stores_lock); + wake_up_bit(&cookie->flags, 0); + } + spin_unlock(&object->lock); + if (xpage) + page_cache_release(xpage); } /* @@ -591,7 +601,7 @@ static void fscache_write_op(struct fscache_operation *_op) struct fscache_storage *op = container_of(_op, struct fscache_storage, op); struct fscache_object *object = op->op.object; - struct fscache_cookie *cookie = object->cookie; + struct fscache_cookie *cookie; struct page *page; unsigned n; void *results[1]; @@ -601,16 +611,17 @@ static void fscache_write_op(struct fscache_operation *_op) fscache_set_op_state(&op->op, "GetPage"); - spin_lock(&cookie->lock); spin_lock(&object->lock); + cookie = object->cookie; - if (!fscache_object_is_active(object)) { + if (!fscache_object_is_active(object) || !cookie) { spin_unlock(&object->lock); - spin_unlock(&cookie->lock); _leave(""); return; } + spin_lock(&cookie->stores_lock); + fscache_stat(&fscache_n_store_calls); /* find a page to store */ @@ -621,23 +632,25 @@ static void fscache_write_op(struct fscache_operation *_op) goto superseded; page = results[0]; _debug("gang %d [%lx]", n, page->index); - if (page->index > op->store_limit) + if (page->index > op->store_limit) { + fscache_stat(&fscache_n_store_pages_over_limit); goto superseded; + } radix_tree_tag_clear(&cookie->stores, page->index, FSCACHE_COOKIE_PENDING_TAG); + spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); - spin_unlock(&cookie->lock); if (page) { fscache_set_op_state(&op->op, "Store"); + fscache_stat(&fscache_n_store_pages); fscache_stat(&fscache_n_cop_write_page); ret = object->cache->ops->write_page(op, page); fscache_stat_d(&fscache_n_cop_write_page); fscache_set_op_state(&op->op, "EndWrite"); - fscache_end_page_write(cookie, page); - page_cache_release(page); + fscache_end_page_write(object, page); if (ret < 0) { fscache_set_op_state(&op->op, "Abort"); fscache_abort_object(object); @@ -653,9 +666,9 @@ superseded: /* this writer is going away and there aren't any more things to * write */ _debug("cease"); + spin_unlock(&cookie->stores_lock); clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); spin_unlock(&object->lock); - spin_unlock(&cookie->lock); _leave(""); } @@ -731,6 +744,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, /* add the page to the pending-storage radix tree on the backing * object */ spin_lock(&object->lock); + spin_lock(&cookie->stores_lock); _debug("store limit %llx", (unsigned long long) object->store_limit); @@ -751,6 +765,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) goto already_pending; + spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); @@ -772,6 +787,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, already_queued: fscache_stat(&fscache_n_stores_again); already_pending: + spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); spin_unlock(&cookie->lock); radix_tree_preload_end(); @@ -781,7 +797,9 @@ already_pending: return 0; submit_failed: + spin_lock(&cookie->stores_lock); radix_tree_delete(&cookie->stores, page->index); + spin_unlock(&cookie->stores_lock); page_cache_release(page); ret = -ENOBUFS; goto nobufs; -- cgit v1.2.3 From 285e728b0ac55b53a673114096168d6f74930167 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:29 +0000 Subject: FS-Cache: Don't delete pending pages from the page-store tracking tree Don't delete pending pages from the page-store tracking tree, but rather send them for another write as they've presumably been updated. Signed-off-by: David Howells --- fs/fscache/page.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs/fscache/page.c') diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 3ea8897bc21..022a5da8e13 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -57,8 +57,11 @@ static void fscache_end_page_write(struct fscache_object *object, /* delete the page from the tree if it is now no longer * pending */ spin_lock(&cookie->stores_lock); - fscache_stat(&fscache_n_store_radix_deletes); - xpage = radix_tree_delete(&cookie->stores, page->index); + if (!radix_tree_tag_get(&cookie->stores, page->index, + FSCACHE_COOKIE_PENDING_TAG)) { + fscache_stat(&fscache_n_store_radix_deletes); + xpage = radix_tree_delete(&cookie->stores, page->index); + } spin_unlock(&cookie->stores_lock); wake_up_bit(&cookie->flags, 0); } -- cgit v1.2.3 From 201a15428bd54f83eccec8b7c64a04b8f9431204 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:35 +0000 Subject: FS-Cache: Handle pages pending storage that get evicted under OOM conditions Handle netfs pages that the vmscan algorithm wants to evict from the pagecache under OOM conditions, but that are waiting for write to the cache. Under these conditions, vmscan calls the releasepage() function of the netfs, asking if a page can be discarded. The problem is typified by the following trace of a stuck process: kslowd005 D 0000000000000000 0 4253 2 0x00000080 ffff88001b14f370 0000000000000046 ffff880020d0d000 0000000000000007 0000000000000006 0000000000000001 ffff88001b14ffd8 ffff880020d0d2a8 000000000000ddf0 00000000000118c0 00000000000118c0 ffff880020d0d2a8 Call Trace: [] __fscache_wait_on_page_write+0x8b/0xa7 [fscache] [] ? autoremove_wake_function+0x0/0x34 [] ? __fscache_check_page_write+0x63/0x70 [fscache] [] nfs_fscache_release_page+0x4e/0xc4 [nfs] [] nfs_release_page+0x3c/0x41 [nfs] [] try_to_release_page+0x32/0x3b [] shrink_page_list+0x316/0x4ac [] shrink_inactive_list+0x392/0x67c [] ? __mutex_unlock_slowpath+0x100/0x10b [] ? trace_hardirqs_on_caller+0x10c/0x130 [] ? mutex_unlock+0x9/0xb [] shrink_list+0x8d/0x8f [] shrink_zone+0x278/0x33c [] ? ktime_get_ts+0xad/0xba [] try_to_free_pages+0x22e/0x392 [] ? isolate_pages_global+0x0/0x212 [] __alloc_pages_nodemask+0x3dc/0x5cf [] grab_cache_page_write_begin+0x65/0xaa [] ext3_write_begin+0x78/0x1eb [] generic_file_buffered_write+0x109/0x28c [] ? current_fs_time+0x22/0x29 [] __generic_file_aio_write+0x350/0x385 [] ? generic_file_aio_write+0x4a/0xae [] generic_file_aio_write+0x60/0xae [] do_sync_write+0xe3/0x120 [] ? autoremove_wake_function+0x0/0x34 [] ? __dentry_open+0x1a5/0x2b8 [] ? dentry_open+0x82/0x89 [] cachefiles_write_page+0x298/0x335 [cachefiles] [] fscache_write_op+0x178/0x2c2 [fscache] [] fscache_op_execute+0x7a/0xd1 [fscache] [] slow_work_execute+0x18f/0x2d1 [] slow_work_thread+0x1c5/0x308 [] ? autoremove_wake_function+0x0/0x34 [] ? slow_work_thread+0x0/0x308 [] kthread+0x7a/0x82 [] child_rip+0xa/0x20 [] ? restore_args+0x0/0x30 [] ? tg_shares_up+0x171/0x227 [] ? kthread+0x0/0x82 [] ? child_rip+0x0/0x20 In the above backtrace, the following is happening: (1) A page storage operation is being executed by a slow-work thread (fscache_write_op()). (2) FS-Cache farms the operation out to the cache to perform (cachefiles_write_page()). (3) CacheFiles is then calling Ext3 to perform the actual write, using Ext3's standard write (do_sync_write()) under KERNEL_DS directly from the netfs page. (4) However, for Ext3 to perform the write, it must allocate some memory, in particular, it must allocate at least one page cache page into which it can copy the data from the netfs page. (5) Under OOM conditions, the memory allocator can't immediately come up with a page, so it uses vmscan to find something to discard (try_to_free_pages()). (6) vmscan finds a clean netfs page it might be able to discard (possibly the one it's trying to write out). (7) The netfs is called to throw the page away (nfs_release_page()) - but it's called with __GFP_WAIT, so the netfs decides to wait for the store to complete (__fscache_wait_on_page_write()). (8) This blocks a slow-work processing thread - possibly against itself. The system ends up stuck because it can't write out any netfs pages to the cache without allocating more memory. To avoid this, we make FS-Cache cancel some writes that aren't in the middle of actually being performed. This means that some data won't make it into the cache this time. To support this, a new FS-Cache function is added fscache_maybe_release_page() that replaces what the netfs releasepage() functions used to do with respect to the cache. The decisions fscache_maybe_release_page() makes are counted and displayed through /proc/fs/fscache/stats on a line labelled "VmScan". There are four counters provided: "nos=N" - pages that weren't pending storage; "gon=N" - pages that were pending storage when we first looked, but weren't by the time we got the object lock; "bsy=N" - pages that we ignored as they were actively being written when we looked; and "can=N" - pages that we cancelled the storage of. What I'd really like to do is alter the behaviour of the cancellation heuristics, depending on how necessary it is to expel pages. If there are plenty of other pages that aren't waiting to be written to the cache that could be ejected first, then it would be nice to hold up on immediate cancellation of cache writes - but I don't see a way of doing that. Signed-off-by: David Howells --- fs/fscache/page.c | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 77 insertions(+), 2 deletions(-) (limited to 'fs/fscache/page.c') diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 022a5da8e13..fc76798bd96 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -42,6 +42,75 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa } EXPORT_SYMBOL(__fscache_wait_on_page_write); +/* + * decide whether a page can be released, possibly by cancelling a store to it + * - we're allowed to sleep if __GFP_WAIT is flagged + */ +bool __fscache_maybe_release_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + struct page *xpage; + void *val; + + _enter("%p,%p,%x", cookie, page, gfp); + + rcu_read_lock(); + val = radix_tree_lookup(&cookie->stores, page->index); + if (!val) { + rcu_read_unlock(); + fscache_stat(&fscache_n_store_vmscan_not_storing); + __fscache_uncache_page(cookie, page); + return true; + } + + /* see if the page is actually undergoing storage - if so we can't get + * rid of it till the cache has finished with it */ + if (radix_tree_tag_get(&cookie->stores, page->index, + FSCACHE_COOKIE_STORING_TAG)) { + rcu_read_unlock(); + goto page_busy; + } + + /* the page is pending storage, so we attempt to cancel the store and + * discard the store request so that the page can be reclaimed */ + spin_lock(&cookie->stores_lock); + rcu_read_unlock(); + + if (radix_tree_tag_get(&cookie->stores, page->index, + FSCACHE_COOKIE_STORING_TAG)) { + /* the page started to undergo storage whilst we were looking, + * so now we can only wait or return */ + spin_unlock(&cookie->stores_lock); + goto page_busy; + } + + xpage = radix_tree_delete(&cookie->stores, page->index); + spin_unlock(&cookie->stores_lock); + + if (xpage) { + fscache_stat(&fscache_n_store_vmscan_cancelled); + fscache_stat(&fscache_n_store_radix_deletes); + ASSERTCMP(xpage, ==, page); + } else { + fscache_stat(&fscache_n_store_vmscan_gone); + } + + wake_up_bit(&cookie->flags, 0); + if (xpage) + page_cache_release(xpage); + __fscache_uncache_page(cookie, page); + return true; + +page_busy: + /* we might want to wait here, but that could deadlock the allocator as + * the slow-work threads writing to the cache may all end up sleeping + * on memory allocation */ + fscache_stat(&fscache_n_store_vmscan_busy); + return false; +} +EXPORT_SYMBOL(__fscache_maybe_release_page); + /* * note that a page has finished being written to the cache */ @@ -57,6 +126,8 @@ static void fscache_end_page_write(struct fscache_object *object, /* delete the page from the tree if it is now no longer * pending */ spin_lock(&cookie->stores_lock); + radix_tree_tag_clear(&cookie->stores, page->index, + FSCACHE_COOKIE_STORING_TAG); if (!radix_tree_tag_get(&cookie->stores, page->index, FSCACHE_COOKIE_PENDING_TAG)) { fscache_stat(&fscache_n_store_radix_deletes); @@ -640,8 +711,12 @@ static void fscache_write_op(struct fscache_operation *_op) goto superseded; } - radix_tree_tag_clear(&cookie->stores, page->index, - FSCACHE_COOKIE_PENDING_TAG); + if (page) { + radix_tree_tag_set(&cookie->stores, page->index, + FSCACHE_COOKIE_STORING_TAG); + radix_tree_tag_clear(&cookie->stores, page->index, + FSCACHE_COOKIE_PENDING_TAG); + } spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); -- cgit v1.2.3 From 60d543ca724be155c2b6166e36a00c80b21bd810 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:11:45 +0000 Subject: FS-Cache: Start processing an object's operations on that object's death Start processing an object's operations when that object moves into the DYING state as the object cannot be destroyed until all its outstanding operations have completed. Furthermore, make sure that read and allocation operations handle being woken up on a dead object. Such events are recorded in the Allocs.abt and Retrvls.abt statistics as viewable through /proc/fs/fscache/stats. The code for waiting for object activation for the read and allocation operations is also extracted into its own function as it is much the same in all cases, differing only in the stats incremented. Signed-off-by: David Howells --- fs/fscache/page.c | 112 +++++++++++++++++++++++++++--------------------------- 1 file changed, 55 insertions(+), 57 deletions(-) (limited to 'fs/fscache/page.c') diff --git a/fs/fscache/page.c b/fs/fscache/page.c index fc76798bd96..c598ea4c4e7 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -313,6 +313,43 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) return 0; } +/* + * wait for an object to become active (or dead) + */ +static int fscache_wait_for_retrieval_activation(struct fscache_object *object, + struct fscache_retrieval *op, + atomic_t *stat_op_waits, + atomic_t *stat_object_dead) +{ + int ret; + + if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags)) + goto check_if_dead; + + _debug(">>> WT"); + fscache_stat(stat_op_waits); + if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit_interruptible, + TASK_INTERRUPTIBLE) < 0) { + ret = fscache_cancel_op(&op->op); + if (ret == 0) + return -ERESTARTSYS; + + /* it's been removed from the pending queue by another party, + * so we should get to run shortly */ + wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, + fscache_wait_bit, TASK_UNINTERRUPTIBLE); + } + _debug("<<< GO"); + +check_if_dead: + if (unlikely(fscache_object_is_dead(object))) { + fscache_stat(stat_object_dead); + return -ENOBUFS; + } + return 0; +} + /* * read a page from the cache or allocate a block in which to store it * - we return: @@ -376,25 +413,12 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, /* we wait for the operation to become active, and then process it * *here*, in this thread, and not in the thread pool */ - if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { - _debug(">>> WT"); - fscache_stat(&fscache_n_retrieval_op_waits); - if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit_interruptible, - TASK_INTERRUPTIBLE) < 0) { - ret = fscache_cancel_op(&op->op); - if (ret == 0) { - ret = -ERESTARTSYS; - goto error; - } - - /* it's been removed from the pending queue by another - * party, so we should get to run shortly */ - wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); - } - _debug("<<< GO"); - } + ret = fscache_wait_for_retrieval_activation( + object, op, + __fscache_stat(&fscache_n_retrieval_op_waits), + __fscache_stat(&fscache_n_retrievals_object_dead)); + if (ret < 0) + goto error; /* ask the cache to honour the operation */ if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { @@ -506,25 +530,12 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, /* we wait for the operation to become active, and then process it * *here*, in this thread, and not in the thread pool */ - if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { - _debug(">>> WT"); - fscache_stat(&fscache_n_retrieval_op_waits); - if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit_interruptible, - TASK_INTERRUPTIBLE) < 0) { - ret = fscache_cancel_op(&op->op); - if (ret == 0) { - ret = -ERESTARTSYS; - goto error; - } - - /* it's been removed from the pending queue by another - * party, so we should get to run shortly */ - wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); - } - _debug("<<< GO"); - } + ret = fscache_wait_for_retrieval_activation( + object, op, + __fscache_stat(&fscache_n_retrieval_op_waits), + __fscache_stat(&fscache_n_retrievals_object_dead)); + if (ret < 0) + goto error; /* ask the cache to honour the operation */ if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { @@ -612,25 +623,12 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, fscache_stat(&fscache_n_alloc_ops); - if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { - _debug(">>> WT"); - fscache_stat(&fscache_n_alloc_op_waits); - if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit_interruptible, - TASK_INTERRUPTIBLE) < 0) { - ret = fscache_cancel_op(&op->op); - if (ret == 0) { - ret = -ERESTARTSYS; - goto error; - } - - /* it's been removed from the pending queue by another - * party, so we should get to run shortly */ - wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, - fscache_wait_bit, TASK_UNINTERRUPTIBLE); - } - _debug("<<< GO"); - } + ret = fscache_wait_for_retrieval_activation( + object, op, + __fscache_stat(&fscache_n_alloc_op_waits), + __fscache_stat(&fscache_n_allocs_object_dead)); + if (ret < 0) + goto error; /* ask the cache to honour the operation */ fscache_stat(&fscache_n_cop_allocate_page); -- cgit v1.2.3