aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-21 10:29:44 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commit4a09675279674041862d2210635b0cc1f60be28e (patch)
tree19e4736c062f87729dcdc1bd57f4919b3227ec32 /fs/btrfs/extent-tree.c
parente5a2217ef6ff088d08a27208929a6f9c635d672c (diff)
Btrfs: Data ordered fixes
* In btrfs_delete_inode, wait for ordered extents after calling truncate_inode_pages. This is much faster, and more correct * Properly clear our the PageChecked bit everywhere we redirty the page. * Change the writepage fixup handler to lock the page range and check to see if an ordered extent had been inserted since the improperly dirtied page was discovered * Wait for ordered extents outside the transaction. This isn't required for locking rules but does improve transaction latencies * Reduce contention on the alloc_mutex by dropping it while incrementing refs on a node/leaf and while dropping refs on a leaf. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index be2aef1cb7a..ccd49322f79 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -934,7 +934,6 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (!root->ref_cows)
return 0;
- mutex_lock(&root->fs_info->alloc_mutex);
level = btrfs_header_level(buf);
nritems = btrfs_header_nritems(buf);
for (i = 0; i < nritems; i++) {
@@ -951,29 +950,36 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
if (disk_bytenr == 0)
continue;
+
+ mutex_lock(&root->fs_info->alloc_mutex);
ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
btrfs_file_extent_disk_num_bytes(buf, fi),
root->root_key.objectid, trans->transid,
key.objectid, key.offset);
+ mutex_unlock(&root->fs_info->alloc_mutex);
if (ret) {
faili = i;
+ WARN_ON(1);
goto fail;
}
} else {
bytenr = btrfs_node_blockptr(buf, i);
btrfs_node_key_to_cpu(buf, &key, i);
+
+ mutex_lock(&root->fs_info->alloc_mutex);
ret = __btrfs_inc_extent_ref(trans, root, bytenr,
btrfs_level_size(root, level - 1),
root->root_key.objectid,
trans->transid,
level - 1, key.objectid);
+ mutex_unlock(&root->fs_info->alloc_mutex);
if (ret) {
faili = i;
+ WARN_ON(1);
goto fail;
}
}
}
- mutex_unlock(&root->fs_info->alloc_mutex);
return 0;
fail:
WARN_ON(1);
@@ -1004,7 +1010,6 @@ fail:
}
}
#endif
- mutex_unlock(&root->fs_info->alloc_mutex);
return ret;
}
@@ -2180,6 +2185,8 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
leaf_owner = btrfs_header_owner(leaf);
leaf_generation = btrfs_header_generation(leaf);
+ mutex_unlock(&root->fs_info->alloc_mutex);
+
for (i = 0; i < nritems; i++) {
u64 disk_bytenr;
@@ -2197,12 +2204,17 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
if (disk_bytenr == 0)
continue;
+
+ mutex_lock(&root->fs_info->alloc_mutex);
ret = __btrfs_free_extent(trans, root, disk_bytenr,
btrfs_file_extent_disk_num_bytes(leaf, fi),
leaf_owner, leaf_generation,
key.objectid, key.offset, 0);
+ mutex_unlock(&root->fs_info->alloc_mutex);
BUG_ON(ret);
}
+
+ mutex_lock(&root->fs_info->alloc_mutex);
return 0;
}