aboutsummaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-10-16 17:36:06 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-16 17:36:06 -0700
commit3d80636a0d5f056ffc26472d05b6027a7a9f6e1c (patch)
tree39b3dbae53938728c55b2312190561b858431d92 /mm/vmscan.c
parent688ce17b8599abc548b406c00e4d18ae0dec954f (diff)
Fix memory ordering bug in page reclaim
As noticed by Nick Piggin, we need to make sure that we check the page count before we check for PageDirty, since the dirty check is only valid if the count implies that we're the only possible ones holding the page. We always did do this, but the code needs a read-memory-barrier to make sure that the orderign is also honored by the CPU. (The writer side is ordered due to the atomic decrement and test on the page count, see the discussion on linux-kernel) Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0ea71e887bb..64f9570cff5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -511,10 +511,11 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
* PageDirty _after_ making sure that the page is freeable and
* not in use by anybody. (pagecache + us == 2)
*/
- if (page_count(page) != 2 || PageDirty(page)) {
- write_unlock_irq(&mapping->tree_lock);
- goto keep_locked;
- }
+ if (unlikely(page_count(page) != 2))
+ goto cannot_free;
+ smp_rmb();
+ if (unlikely(PageDirty(page)))
+ goto cannot_free;
#ifdef CONFIG_SWAP
if (PageSwapCache(page)) {
@@ -538,6 +539,10 @@ free_it:
__pagevec_release_nonlru(&freed_pvec);
continue;
+cannot_free:
+ write_unlock_irq(&mapping->tree_lock);
+ goto keep_locked;
+
activate_locked:
SetPageActive(page);
pgactivate++;