aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/transaction.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-29 16:15:18 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commitab78c84de1ce4db1b2a2cef361625ad80abbab3f (patch)
treea9726645832b50da1da9a9690c4200c0f198ce7d /fs/btrfs/transaction.c
parent1a3f5d0400d786aec41ede15fec5710ad1a1d18b (diff)
Btrfs: Throttle operations if the reference cache gets too large
A large reference cache is directly related to a lot of work pending for the cleaner thread. This throttles back new operations based on the size of the reference cache so the cleaner thread will be able to keep up. Overall, this actually makes the FS faster because the cleaner thread will be more likely to find things in cache. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/transaction.c')
-rw-r--r--fs/btrfs/transaction.c59
1 files changed, 44 insertions, 15 deletions
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index fcef3cae0c9..b8be6703189 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -202,35 +202,64 @@ static noinline int wait_for_commit(struct btrfs_root *root,
return 0;
}
+void btrfs_throttle(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *info = root->fs_info;
+
+harder:
+ if (atomic_read(&info->throttles)) {
+ DEFINE_WAIT(wait);
+ int thr;
+ int harder_count = 0;
+ thr = atomic_read(&info->throttle_gen);
+
+ do {
+ prepare_to_wait(&info->transaction_throttle,
+ &wait, TASK_UNINTERRUPTIBLE);
+ if (!atomic_read(&info->throttles)) {
+ finish_wait(&info->transaction_throttle, &wait);
+ break;
+ }
+ schedule();
+ finish_wait(&info->transaction_throttle, &wait);
+ } while (thr == atomic_read(&info->throttle_gen));
+
+ if (harder_count < 5 &&
+ info->total_ref_cache_size > 5 * 1024 * 1024) {
+ harder_count++;
+ goto harder;
+ }
+
+ if (harder_count < 10 &&
+ info->total_ref_cache_size > 10 * 1024 * 1024) {
+ harder_count++;
+ goto harder;
+ }
+ }
+}
+
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int throttle)
{
struct btrfs_transaction *cur_trans;
+ struct btrfs_fs_info *info = root->fs_info;
- mutex_lock(&root->fs_info->trans_mutex);
- cur_trans = root->fs_info->running_transaction;
+ mutex_lock(&info->trans_mutex);
+ cur_trans = info->running_transaction;
WARN_ON(cur_trans != trans->transaction);
WARN_ON(cur_trans->num_writers < 1);
cur_trans->num_writers--;
if (waitqueue_active(&cur_trans->writer_wait))
wake_up(&cur_trans->writer_wait);
-
- if (throttle && atomic_read(&root->fs_info->throttles)) {
- DEFINE_WAIT(wait);
- mutex_unlock(&root->fs_info->trans_mutex);
- prepare_to_wait(&root->fs_info->transaction_throttle, &wait,
- TASK_UNINTERRUPTIBLE);
- if (atomic_read(&root->fs_info->throttles))
- schedule();
- finish_wait(&root->fs_info->transaction_throttle, &wait);
- mutex_lock(&root->fs_info->trans_mutex);
- }
-
put_transaction(cur_trans);
- mutex_unlock(&root->fs_info->trans_mutex);
+ mutex_unlock(&info->trans_mutex);
memset(trans, 0, sizeof(*trans));
kmem_cache_free(btrfs_trans_handle_cachep, trans);
+
+ if (throttle)
+ btrfs_throttle(root);
+
return 0;
}