aboutsummaryrefslogtreecommitdiff
path: root/fs/cachefiles
diff options
context:
space:
mode:
Diffstat (limited to 'fs/cachefiles')
-rw-r--r--fs/cachefiles/interface.c32
-rw-r--r--fs/cachefiles/namei.c187
-rw-r--r--fs/cachefiles/rdwr.c130
3 files changed, 285 insertions, 64 deletions
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 431accd475a..27089311fbe 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -114,8 +114,9 @@ nomem_lookup_data:
/*
* attempt to look up the nominated node in this cache
+ * - return -ETIMEDOUT to be scheduled again
*/
-static void cachefiles_lookup_object(struct fscache_object *_object)
+static int cachefiles_lookup_object(struct fscache_object *_object)
{
struct cachefiles_lookup_data *lookup_data;
struct cachefiles_object *parent, *object;
@@ -145,13 +146,15 @@ static void cachefiles_lookup_object(struct fscache_object *_object)
object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
cachefiles_attr_changed(&object->fscache);
- if (ret < 0) {
- printk(KERN_WARNING "CacheFiles: Lookup failed error %d\n",
- ret);
+ if (ret < 0 && ret != -ETIMEDOUT) {
+ if (ret != -ENOBUFS)
+ printk(KERN_WARNING
+ "CacheFiles: Lookup failed error %d\n", ret);
fscache_object_lookup_error(&object->fscache);
}
_leave(" [%d]", ret);
+ return ret;
}
/*
@@ -331,6 +334,7 @@ static void cachefiles_put_object(struct fscache_object *_object)
}
cache = object->fscache.cache;
+ fscache_object_destroy(&object->fscache);
kmem_cache_free(cachefiles_object_jar, object);
fscache_object_destroyed(cache);
}
@@ -403,12 +407,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
if (oi_size == ni_size)
return 0;
- newattrs.ia_size = ni_size;
- newattrs.ia_valid = ATTR_SIZE;
-
cachefiles_begin_secure(cache, &saved_cred);
mutex_lock(&object->backer->d_inode->i_mutex);
+
+ /* if there's an extension to a partial page at the end of the backing
+ * file, we need to discard the partial page so that we pick up new
+ * data after it */
+ if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
+ _debug("discard tail %llx", oi_size);
+ newattrs.ia_valid = ATTR_SIZE;
+ newattrs.ia_size = oi_size & PAGE_MASK;
+ ret = notify_change(object->backer, &newattrs);
+ if (ret < 0)
+ goto truncate_failed;
+ }
+
+ newattrs.ia_valid = ATTR_SIZE;
+ newattrs.ia_size = ni_size;
ret = notify_change(object->backer, &newattrs);
+
+truncate_failed:
mutex_unlock(&object->backer->d_inode->i_mutex);
cachefiles_end_secure(cache, saved_cred);
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 4ce818ae39e..14ac4806e29 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -21,17 +21,81 @@
#include <linux/security.h>
#include "internal.h"
-static int cachefiles_wait_bit(void *flags)
+#define CACHEFILES_KEYBUF_SIZE 512
+
+/*
+ * dump debugging info about an object
+ */
+static noinline
+void __cachefiles_printk_object(struct cachefiles_object *object,
+ const char *prefix,
+ u8 *keybuf)
{
- schedule();
- return 0;
+ struct fscache_cookie *cookie;
+ unsigned keylen, loop;
+
+ printk(KERN_ERR "%sobject: OBJ%x\n",
+ prefix, object->fscache.debug_id);
+ printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n",
+ prefix, fscache_object_states[object->fscache.state],
+ object->fscache.flags, object->fscache.work.flags,
+ object->fscache.events,
+ object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
+ printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
+ prefix, object->fscache.n_ops, object->fscache.n_in_progress,
+ object->fscache.n_exclusive);
+ printk(KERN_ERR "%sparent=%p\n",
+ prefix, object->fscache.parent);
+
+ spin_lock(&object->fscache.lock);
+ cookie = object->fscache.cookie;
+ if (cookie) {
+ printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
+ prefix,
+ object->fscache.cookie,
+ object->fscache.cookie->parent,
+ object->fscache.cookie->netfs_data,
+ object->fscache.cookie->flags);
+ if (keybuf)
+ keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
+ CACHEFILES_KEYBUF_SIZE);
+ else
+ keylen = 0;
+ } else {
+ printk(KERN_ERR "%scookie=NULL\n", prefix);
+ keylen = 0;
+ }
+ spin_unlock(&object->fscache.lock);
+
+ if (keylen) {
+ printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
+ for (loop = 0; loop < keylen; loop++)
+ printk("%02x", keybuf[loop]);
+ printk("'\n");
+ }
+}
+
+/*
+ * dump debugging info about a pair of objects
+ */
+static noinline void cachefiles_printk_object(struct cachefiles_object *object,
+ struct cachefiles_object *xobject)
+{
+ u8 *keybuf;
+
+ keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
+ if (object)
+ __cachefiles_printk_object(object, "", keybuf);
+ if (xobject)
+ __cachefiles_printk_object(xobject, "x", keybuf);
+ kfree(keybuf);
}
/*
* record the fact that an object is now active
*/
-static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
- struct cachefiles_object *object)
+static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
+ struct cachefiles_object *object)
{
struct cachefiles_object *xobject;
struct rb_node **_p, *_parent = NULL;
@@ -42,8 +106,11 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
try_again:
write_lock(&cache->active_lock);
- if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags))
+ if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
+ printk(KERN_ERR "CacheFiles: Error: Object already active\n");
+ cachefiles_printk_object(object, NULL);
BUG();
+ }
dentry = object->dentry;
_p = &cache->active_nodes.rb_node;
@@ -66,8 +133,8 @@ try_again:
rb_insert_color(&object->active_node, &cache->active_nodes);
write_unlock(&cache->active_lock);
- _leave("");
- return;
+ _leave(" = 0");
+ return 0;
/* an old object from a previous incarnation is hogging the slot - we
* need to wait for it to be destroyed */
@@ -76,44 +143,70 @@ wait_for_old_object:
printk(KERN_ERR "\n");
printk(KERN_ERR "CacheFiles: Error:"
" Unexpected object collision\n");
- printk(KERN_ERR "xobject: OBJ%x\n",
- xobject->fscache.debug_id);
- printk(KERN_ERR "xobjstate=%s\n",
- fscache_object_states[xobject->fscache.state]);
- printk(KERN_ERR "xobjflags=%lx\n", xobject->fscache.flags);
- printk(KERN_ERR "xobjevent=%lx [%lx]\n",
- xobject->fscache.events, xobject->fscache.event_mask);
- printk(KERN_ERR "xops=%u inp=%u exc=%u\n",
- xobject->fscache.n_ops, xobject->fscache.n_in_progress,
- xobject->fscache.n_exclusive);
- printk(KERN_ERR "xcookie=%p [pr=%p nd=%p fl=%lx]\n",
- xobject->fscache.cookie,
- xobject->fscache.cookie->parent,
- xobject->fscache.cookie->netfs_data,
- xobject->fscache.cookie->flags);
- printk(KERN_ERR "xparent=%p\n",
- xobject->fscache.parent);
- printk(KERN_ERR "object: OBJ%x\n",
- object->fscache.debug_id);
- printk(KERN_ERR "cookie=%p [pr=%p nd=%p fl=%lx]\n",
- object->fscache.cookie,
- object->fscache.cookie->parent,
- object->fscache.cookie->netfs_data,
- object->fscache.cookie->flags);
- printk(KERN_ERR "parent=%p\n",
- object->fscache.parent);
+ cachefiles_printk_object(object, xobject);
BUG();
}
atomic_inc(&xobject->usage);
write_unlock(&cache->active_lock);
- _debug(">>> wait");
- wait_on_bit(&xobject->flags, CACHEFILES_OBJECT_ACTIVE,
- cachefiles_wait_bit, TASK_UNINTERRUPTIBLE);
- _debug("<<< waited");
+ if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
+ wait_queue_head_t *wq;
+
+ signed long timeout = 60 * HZ;
+ wait_queue_t wait;
+ bool requeue;
+
+ /* if the object we're waiting for is queued for processing,
+ * then just put ourselves on the queue behind it */
+ if (slow_work_is_queued(&xobject->fscache.work)) {
+ _debug("queue OBJ%x behind OBJ%x immediately",
+ object->fscache.debug_id,
+ xobject->fscache.debug_id);
+ goto requeue;
+ }
+
+ /* otherwise we sleep until either the object we're waiting for
+ * is done, or the slow-work facility wants the thread back to
+ * do other work */
+ wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
+ init_wait(&wait);
+ requeue = false;
+ do {
+ prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+ if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
+ break;
+ requeue = slow_work_sleep_till_thread_needed(
+ &object->fscache.work, &timeout);
+ } while (timeout > 0 && !requeue);
+ finish_wait(wq, &wait);
+
+ if (requeue &&
+ test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
+ _debug("queue OBJ%x behind OBJ%x after wait",
+ object->fscache.debug_id,
+ xobject->fscache.debug_id);
+ goto requeue;
+ }
+
+ if (timeout <= 0) {
+ printk(KERN_ERR "\n");
+ printk(KERN_ERR "CacheFiles: Error: Overlong"
+ " wait for old active object to go away\n");
+ cachefiles_printk_object(object, xobject);
+ goto requeue;
+ }
+ }
+
+ ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
cache->cache.ops->put_object(&xobject->fscache);
goto try_again;
+
+requeue:
+ clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
+ cache->cache.ops->put_object(&xobject->fscache);
+ _leave(" = -ETIMEDOUT");
+ return -ETIMEDOUT;
}
/*
@@ -254,7 +347,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
dir = dget_parent(object->dentry);
- mutex_lock(&dir->d_inode->i_mutex);
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
ret = cachefiles_bury_object(cache, dir, object->dentry);
dput(dir);
@@ -307,7 +400,7 @@ lookup_again:
/* search the current directory for the element name */
_debug("lookup '%s'", name);
- mutex_lock(&dir->d_inode->i_mutex);
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
start = jiffies;
next = lookup_one_len(name, dir, nlen);
@@ -418,12 +511,15 @@ lookup_again:
}
/* note that we're now using this object */
- cachefiles_mark_object_active(cache, object);
+ ret = cachefiles_mark_object_active(cache, object);
mutex_unlock(&dir->d_inode->i_mutex);
dput(dir);
dir = NULL;
+ if (ret == -ETIMEDOUT)
+ goto mark_active_timed_out;
+
_debug("=== OBTAINED_OBJECT ===");
if (object->new) {
@@ -467,6 +563,10 @@ create_error:
cachefiles_io_error(cache, "Create/mkdir failed");
goto error;
+mark_active_timed_out:
+ _debug("mark active timed out");
+ goto release_dentry;
+
check_error:
_debug("check error %d", ret);
write_lock(&cache->active_lock);
@@ -474,7 +574,7 @@ check_error:
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
write_unlock(&cache->active_lock);
-
+release_dentry:
dput(object->dentry);
object->dentry = NULL;
goto error_out;
@@ -495,9 +595,6 @@ error:
error_out2:
dput(dir);
error_out:
- if (ret == -ENOSPC)
- ret = -ENOBUFS;
-
_leave(" = error %d", -ret);
return ret;
}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index a69787e7dd9..a6c8c6fe8df 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -11,6 +11,7 @@
#include <linux/mount.h>
#include <linux/file.h>
+#include <linux/ima.h>
#include "internal.h"
/*
@@ -40,8 +41,10 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
_debug("--- monitor %p %lx ---", page, page->flags);
- if (!PageUptodate(page) && !PageError(page))
- dump_stack();
+ if (!PageUptodate(page) && !PageError(page)) {
+ /* unlocked, not uptodate and not erronous? */
+ _debug("page probably truncated");
+ }
/* remove from the waitqueue */
list_del(&wait->task_list);
@@ -61,6 +64,84 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
}
/*
+ * handle a probably truncated page
+ * - check to see if the page is still relevant and reissue the read if
+ * possible
+ * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
+ * must wait again and 0 if successful
+ */
+static int cachefiles_read_reissue(struct cachefiles_object *object,
+ struct cachefiles_one_read *monitor)
+{
+ struct address_space *bmapping = object->backer->d_inode->i_mapping;
+ struct page *backpage = monitor->back_page, *backpage2;
+ int ret;
+
+ kenter("{ino=%lx},{%lx,%lx}",
+ object->backer->d_inode->i_ino,
+ backpage->index, backpage->flags);
+
+ /* skip if the page was truncated away completely */
+ if (backpage->mapping != bmapping) {
+ kleave(" = -ENODATA [mapping]");
+ return -ENODATA;
+ }
+
+ backpage2 = find_get_page(bmapping, backpage->index);
+ if (!backpage2) {
+ kleave(" = -ENODATA [gone]");
+ return -ENODATA;
+ }
+
+ if (backpage != backpage2) {
+ put_page(backpage2);
+ kleave(" = -ENODATA [different]");
+ return -ENODATA;
+ }
+
+ /* the page is still there and we already have a ref on it, so we don't
+ * need a second */
+ put_page(backpage2);
+
+ INIT_LIST_HEAD(&monitor->op_link);
+ add_page_wait_queue(backpage, &monitor->monitor);
+
+ if (trylock_page(backpage)) {
+ ret = -EIO;
+ if (PageError(backpage))
+ goto unlock_discard;
+ ret = 0;
+ if (PageUptodate(backpage))
+ goto unlock_discard;
+
+ kdebug("reissue read");
+ ret = bmapping->a_ops->readpage(NULL, backpage);
+ if (ret < 0)
+ goto unlock_discard;
+ }
+
+ /* but the page may have been read before the monitor was installed, so
+ * the monitor may miss the event - so we have to ensure that we do get
+ * one in such a case */
+ if (trylock_page(backpage)) {
+ _debug("jumpstart %p {%lx}", backpage, backpage->flags);
+ unlock_page(backpage);
+ }
+
+ /* it'll reappear on the todo list */
+ kleave(" = -EINPROGRESS");
+ return -EINPROGRESS;
+
+unlock_discard:
+ unlock_page(backpage);
+ spin_lock_irq(&object->work_lock);
+ list_del(&monitor->op_link);
+ spin_unlock_irq(&object->work_lock);
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
* copy data from backing pages to netfs pages to complete a read operation
* - driven by FS-Cache's thread pool
*/
@@ -92,20 +173,26 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
_debug("- copy {%lu}", monitor->back_page->index);
- error = -EIO;
+ recheck:
if (PageUptodate(monitor->back_page)) {
copy_highpage(monitor->netfs_page, monitor->back_page);
pagevec_add(&pagevec, monitor->netfs_page);
fscache_mark_pages_cached(monitor->op, &pagevec);
error = 0;
- }
-
- if (error)
+ } else if (!PageError(monitor->back_page)) {
+ /* the page has probably been truncated */
+ error = cachefiles_read_reissue(object, monitor);
+ if (error == -EINPROGRESS)
+ goto next;
+ goto recheck;
+ } else {
cachefiles_io_error_obj(
object,
"Readpage failed on backing file %lx",
(unsigned long) monitor->back_page->flags);
+ error = -EIO;
+ }
page_cache_release(monitor->back_page);
@@ -114,6 +201,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
fscache_put_retrieval(op);
kfree(monitor);
+ next:
/* let the thread pool have some air occasionally */
max--;
if (max < 0 || need_resched()) {
@@ -333,7 +421,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
- op->op.flags = FSCACHE_OP_FAST;
+ op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
+ op->op.flags |= FSCACHE_OP_FAST;
op->op.processor = cachefiles_read_copier;
pagevec_init(&pagevec, 0);
@@ -639,7 +728,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
pagevec_init(&pagevec, 0);
- op->op.flags = FSCACHE_OP_FAST;
+ op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
+ op->op.flags |= FSCACHE_OP_FAST;
op->op.processor = cachefiles_read_copier;
INIT_LIST_HEAD(&backpages);
@@ -801,7 +891,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
struct cachefiles_cache *cache;
mm_segment_t old_fs;
struct file *file;
- loff_t pos;
+ loff_t pos, eof;
+ size_t len;
void *data;
int ret;
@@ -832,18 +923,33 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
if (IS_ERR(file)) {
ret = PTR_ERR(file);
} else {
+ ima_counts_get(file);
ret = -EIO;
if (file->f_op->write) {
pos = (loff_t) page->index << PAGE_SHIFT;
+
+ /* we mustn't write more data than we have, so we have
+ * to beware of a partial page at EOF */
+ eof = object->fscache.store_limit_l;
+ len = PAGE_SIZE;
+ if (eof & ~PAGE_MASK) {
+ ASSERTCMP(pos, <, eof);
+ if (eof - pos < PAGE_SIZE) {
+ _debug("cut short %llx to %llx",
+ pos, eof);
+ len = eof - pos;
+ ASSERTCMP(pos + len, ==, eof);
+ }
+ }
+
data = kmap(page);
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = file->f_op->write(
- file, (const void __user *) data, PAGE_SIZE,
- &pos);
+ file, (const void __user *) data, len, &pos);
set_fs(old_fs);
kunmap(page);
- if (ret != PAGE_SIZE)
+ if (ret != len)
ret = -EIO;
}
fput(file);