aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Fasheh <mark.fasheh@oracle.com>2006-12-11 11:06:36 -0800
committerMark Fasheh <mark.fasheh@oracle.com>2006-12-28 16:38:59 -0800
commit7f4a2a97e324e8c826d1d983bc8efb5c59194f02 (patch)
tree649129bc73b4de1dee662250892acf883cf45ba2
parent6c2aad0567e693f9588d0a0683f96ed872fb4641 (diff)
ocfs2: always unmap in ocfs2_data_convert_worker()
Mmap-heavy clustered workloads were sometimes finding stale data on mmap reads. The solution is to call unmap_mapping_range() on any down convert of a data lock. Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
-rw-r--r--fs/ocfs2/dlmglue.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e6220137bf6..e335541727f 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2718,6 +2718,15 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
inode = ocfs2_lock_res_inode(lockres);
mapping = inode->i_mapping;
+ /*
+ * We need this before the filemap_fdatawrite() so that it can
+ * transfer the dirty bit from the PTE to the
+ * page. Unfortunately this means that even for EX->PR
+ * downconverts, we'll lose our mappings and have to build
+ * them up again.
+ */
+ unmap_mapping_range(mapping, 0, 0, 0);
+
if (filemap_fdatawrite(mapping)) {
mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -2725,7 +2734,6 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
sync_mapping_buffers(mapping);
if (blocking == LKM_EXMODE) {
truncate_inode_pages(mapping, 0);
- unmap_mapping_range(mapping, 0, 0, 0);
} else {
/* We only need to wait on the I/O if we're not also
* truncating pages because truncate_inode_pages waits