aboutsummaryrefslogtreecommitdiff
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c80
1 files changed, 71 insertions, 9 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 833f854eabe..89fee2dcb03 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -62,9 +62,9 @@ static void release_memory_resource(struct resource *res)
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
#ifndef CONFIG_SPARSEMEM_VMEMMAP
-static void get_page_bootmem(unsigned long info, struct page *page, int magic)
+static void get_page_bootmem(unsigned long info, struct page *page, int type)
{
- atomic_set(&page->_mapcount, magic);
+ atomic_set(&page->_mapcount, type);
SetPagePrivate(page);
set_page_private(page, info);
atomic_inc(&page->_count);
@@ -72,10 +72,10 @@ static void get_page_bootmem(unsigned long info, struct page *page, int magic)
void put_page_bootmem(struct page *page)
{
- int magic;
+ int type;
- magic = atomic_read(&page->_mapcount);
- BUG_ON(magic >= -1);
+ type = atomic_read(&page->_mapcount);
+ BUG_ON(type >= -1);
if (atomic_dec_return(&page->_count) == 1) {
ClearPagePrivate(page);
@@ -86,7 +86,7 @@ void put_page_bootmem(struct page *page)
}
-void register_page_bootmem_info_section(unsigned long start_pfn)
+static void register_page_bootmem_info_section(unsigned long start_pfn)
{
unsigned long *usemap, mapsize, section_nr, i;
struct mem_section *ms;
@@ -119,7 +119,7 @@ void register_page_bootmem_info_section(unsigned long start_pfn)
mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
for (i = 0; i < mapsize; i++, page++)
- get_page_bootmem(section_nr, page, MIX_INFO);
+ get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
}
@@ -429,7 +429,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
if (need_zonelists_rebuild)
build_all_zonelists();
- vm_total_pages = nr_free_pagecache_pages();
+ else
+ vm_total_pages = nr_free_pagecache_pages();
+
writeback_set_ratelimit();
if (onlined_pages)
@@ -455,7 +457,7 @@ static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
/* we can use NODE_DATA(nid) from here */
/* init node's zones as empty zones, we don't have any present pages.*/
- free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
+ free_area_init_node(nid, zones_size, start_pfn, zholes_size);
return pgdat;
}
@@ -521,6 +523,66 @@ EXPORT_SYMBOL_GPL(add_memory);
#ifdef CONFIG_MEMORY_HOTREMOVE
/*
+ * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
+ * set and the size of the free page is given by page_order(). Using this,
+ * the function determines if the pageblock contains only free pages.
+ * Due to buddy contraints, a free page at least the size of a pageblock will
+ * be located at the start of the pageblock
+ */
+static inline int pageblock_free(struct page *page)
+{
+ return PageBuddy(page) && page_order(page) >= pageblock_order;
+}
+
+/* Return the start of the next active pageblock after a given page */
+static struct page *next_active_pageblock(struct page *page)
+{
+ int pageblocks_stride;
+
+ /* Ensure the starting page is pageblock-aligned */
+ BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
+
+ /* Move forward by at least 1 * pageblock_nr_pages */
+ pageblocks_stride = 1;
+
+ /* If the entire pageblock is free, move to the end of free page */
+ if (pageblock_free(page))
+ pageblocks_stride += page_order(page) - pageblock_order;
+
+ return page + (pageblocks_stride * pageblock_nr_pages);
+}
+
+/* Checks if this range of memory is likely to be hot-removable. */
+int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
+{
+ int type;
+ struct page *page = pfn_to_page(start_pfn);
+ struct page *end_page = page + nr_pages;
+
+ /* Check the starting page of each pageblock within the range */
+ for (; page < end_page; page = next_active_pageblock(page)) {
+ type = get_pageblock_migratetype(page);
+
+ /*
+ * A pageblock containing MOVABLE or free pages is considered
+ * removable
+ */
+ if (type != MIGRATE_MOVABLE && !pageblock_free(page))
+ return 0;
+
+ /*
+ * A pageblock starting with a PageReserved page is not
+ * considered removable.
+ */
+ if (PageReserved(page))
+ return 0;
+ }
+
+ /* All pageblocks in the memory block are likely to be hot-removable */
+ return 1;
+}
+
+/*
* Confirm all pages in a range [start, end) is belongs to the same zone.
*/
static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)