aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 01:25:59 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:43:00 -0700
commit64c5e135bf5a2a7f0ededb3435a31adbe0202f0c (patch)
treecb4ff93cbcc3c27176723419a313d7c53545d36b /mm
parentac0e5b7a6b93fb291b01fe1e951e3c16bcdd3503 (diff)
don't group high order atomic allocations
Grouping high-order atomic allocations together was intended to allow bursty users of atomic allocations to work such as e1000 in situations where their preallocated buffers were depleted. This did not work in at least one case with a wireless network adapter needing order-1 allocations frequently. To resolve that, the free pages used for min_free_kbytes were moved to separate contiguous blocks with the patch bias-the-location-of-pages-freed-for-min_free_kbytes-in-the-same-max_order_nr_pages-blocks. It is felt that keeping the free pages in the same contiguous blocks should be sufficient for bursty short-lived high-order atomic allocations to succeed, maybe even with the e1000. Even if there is a failure, increasing the value of min_free_kbytes will free pages as contiguous bloks in contrast to the standard buddy allocator which makes no attempt to keep the minimum number of free pages contiguous. This patch backs out grouping high order atomic allocations together to determine if it is really needed or not. If a new report comes in about high-order atomic allocations failing, the feature can be reintroduced to determine if it fixes the problem or not. As a side-effect, this patch reduces by 1 the number of bits required to track the mobility type of pages within a MAX_ORDER_NR_PAGES block. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c37
1 files changed, 7 insertions, 30 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8aec4d4601e..ac8fc51825b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -174,18 +174,13 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
PB_migrate, PB_migrate_end);
}
-static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
+static inline int allocflags_to_migratetype(gfp_t gfp_flags)
{
WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
- /* Cluster high-order atomic allocations together */
- if (unlikely(order > 0) &&
- (!(gfp_flags & __GFP_WAIT) || in_interrupt()))
- return MIGRATE_HIGHATOMIC;
-
/* Cluster based on mobility */
return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
((gfp_flags & __GFP_RECLAIMABLE) != 0);
@@ -706,11 +701,10 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
* the free lists for the desirable migrate type are depleted
*/
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
- [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
- [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
- [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
- [MIGRATE_HIGHATOMIC] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
- [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
+ [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
};
/*
@@ -804,9 +798,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
int current_order;
struct page *page;
int migratetype, i;
- int nonatomic_fallback_atomic = 0;
-retry:
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1; current_order >= order;
--current_order) {
@@ -816,14 +808,6 @@ retry:
/* MIGRATE_RESERVE handled later if necessary */
if (migratetype == MIGRATE_RESERVE)
continue;
- /*
- * Make it hard to fallback to blocks used for
- * high-order atomic allocations
- */
- if (migratetype == MIGRATE_HIGHATOMIC &&
- start_migratetype != MIGRATE_UNMOVABLE &&
- !nonatomic_fallback_atomic)
- continue;
area = &(zone->free_area[current_order]);
if (list_empty(&area->free_list[migratetype]))
@@ -849,8 +833,7 @@ retry:
start_migratetype);
/* Claim the whole block if over half of it is free */
- if ((pages << current_order) >= (1 << (MAX_ORDER-2)) &&
- migratetype != MIGRATE_HIGHATOMIC)
+ if ((pages << current_order) >= (1 << (MAX_ORDER-2)))
set_pageblock_migratetype(page,
start_migratetype);
@@ -872,12 +855,6 @@ retry:
}
}
- /* Allow fallback to high-order atomic blocks if memory is that low */
- if (!nonatomic_fallback_atomic) {
- nonatomic_fallback_atomic = 1;
- goto retry;
- }
-
/* Use MIGRATE_RESERVE rather than fail an allocation */
return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
}
@@ -1112,7 +1089,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
struct page *page;
int cold = !!(gfp_flags & __GFP_COLD);
int cpu;
- int migratetype = allocflags_to_migratetype(gfp_flags, order);
+ int migratetype = allocflags_to_migratetype(gfp_flags);
again:
cpu = get_cpu();