aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-09-08 22:13:02 +0100
committerAnton Altaparmakov <aia21@cantab.net>2005-09-08 22:13:02 +0100
commite604635c8bea16f6177e6133eb3efbfb4a029ef6 (patch)
treed0d7237d58ee4200123701c61ffb35a88872c04c /fs
parenta01ac532b519dc0e0b4d8bc4e12373e4e4cd1b1a (diff)
NTFS: Improve scalability by changing the driver global spin lock in
fs/ntfs/aops.c::ntfs_end_buffer_async_read() to a bit spin lock in the first buffer head of a page. Signed-off-by: Anton Altaparmakov <aia21@cantab.net>
Diffstat (limited to 'fs')
-rw-r--r--fs/ntfs/ChangeLog3
-rw-r--r--fs/ntfs/aops.c15
2 files changed, 12 insertions, 6 deletions
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index 029f856c56c..32a4150ebcb 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -86,6 +86,9 @@ ToDo/Notes:
- Fix fs/ntfs/aops.c::ntfs_{read,write}_block() to handle the case
where a concurrent truncate has truncated the runlist under our feet.
- Fix page_has_buffers()/page_buffers() handling in fs/ntfs/aops.c.
+ - In fs/ntfs/aops.c::ntfs_end_buffer_async_read(), use a bit spin lock
+ in the first buffer head instead of a driver global spin lock to
+ improve scalability.
2.1.23 - Implement extension of resident files and make writing safe as well as
many bug fixes, cleanups, and enhancements...
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 52e1aff98b0..950b686f02d 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -55,9 +55,8 @@
*/
static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
- static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
- struct buffer_head *tmp;
+ struct buffer_head *first, *tmp;
struct page *page;
ntfs_inode *ni;
int page_uptodate = 1;
@@ -89,11 +88,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
} else {
clear_buffer_uptodate(bh);
+ SetPageError(page);
ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.",
(unsigned long long)bh->b_blocknr);
- SetPageError(page);
}
- spin_lock_irqsave(&page_uptodate_lock, flags);
+ first = page_buffers(page);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -108,7 +109,8 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
@@ -141,7 +143,8 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
unlock_page(page);
return;
still_busy:
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
return;
}