aboutsummaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-25 23:31:48 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 08:48:51 -0700
commit006d22d9bbb7e66279ba5cc4556b54eeaf8fd556 (patch)
tree5af5a6676af234db8836bb1e3ef71e6cf8ccb0a9 /mm/page_alloc.c
parent46a82b2d5591335277ed2930611f6acb4ce654ed (diff)
[PATCH] Optimize free_one_page
Free one_page currently adds the page to a fake list and calls free_page_bulk. Fee_page_bulk takes it off again and then calles __free_one_page. Make free_one_page go directly to __free_one_page. Saves list on / off and a temporary list in free_one_page for higher ordered pages. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e8a71657ac4..cc648304756 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -448,9 +448,11 @@ static void free_pages_bulk(struct zone *zone, int count,
static void free_one_page(struct zone *zone, struct page *page, int order)
{
- LIST_HEAD(list);
- list_add(&page->lru, &list);
- free_pages_bulk(zone, 1, &list, order);
+ spin_lock(&zone->lock);
+ zone->all_unreclaimable = 0;
+ zone->pages_scanned = 0;
+ __free_one_page(page, zone ,order);
+ spin_unlock(&zone->lock);
}
static void __free_pages_ok(struct page *page, unsigned int order)