aboutsummaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c91
1 files changed, 75 insertions, 16 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index db861d8b6c2..51c9e2c0164 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -24,14 +24,15 @@
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
static unsigned long surplus_huge_pages;
+static unsigned long nr_overcommit_huge_pages;
unsigned long max_huge_pages;
+unsigned long sysctl_overcommit_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
-unsigned long nr_overcommit_huge_pages;
static int hugetlb_next_nid;
/*
@@ -70,7 +71,25 @@ static void enqueue_huge_page(struct page *page)
free_huge_pages_node[nid]++;
}
-static struct page *dequeue_huge_page(struct vm_area_struct *vma,
+static struct page *dequeue_huge_page(void)
+{
+ int nid;
+ struct page *page = NULL;
+
+ for (nid = 0; nid < MAX_NUMNODES; ++nid) {
+ if (!list_empty(&hugepage_freelists[nid])) {
+ page = list_entry(hugepage_freelists[nid].next,
+ struct page, lru);
+ list_del(&page->lru);
+ free_huge_pages--;
+ free_huge_pages_node[nid]--;
+ break;
+ }
+ }
+ return page;
+}
+
+static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
unsigned long address)
{
int nid;
@@ -119,6 +138,7 @@ static void free_huge_page(struct page *page)
struct address_space *mapping;
mapping = (struct address_space *) page_private(page);
+ set_page_private(page, 0);
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
@@ -133,7 +153,6 @@ static void free_huge_page(struct page *page)
spin_unlock(&hugetlb_lock);
if (mapping)
hugetlb_put_quota(mapping, 1);
- set_page_private(page, 0);
}
/*
@@ -267,6 +286,12 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
spin_lock(&hugetlb_lock);
if (page) {
+ /*
+ * This page is now managed by the hugetlb allocator and has
+ * no users -- drop the buddy allocator's reference.
+ */
+ put_page_testzero(page);
+ VM_BUG_ON(page_count(page));
nid = page_to_nid(page);
set_compound_page_dtor(page, free_huge_page);
/*
@@ -295,8 +320,10 @@ static int gather_surplus_pages(int delta)
int needed, allocated;
needed = (resv_huge_pages + delta) - free_huge_pages;
- if (needed <= 0)
+ if (needed <= 0) {
+ resv_huge_pages += delta;
return 0;
+ }
allocated = 0;
INIT_LIST_HEAD(&surplus_list);
@@ -334,9 +361,12 @@ retry:
* The surplus_list now contains _at_least_ the number of extra pages
* needed to accomodate the reservation. Add the appropriate number
* of pages to the hugetlb pool and free the extras back to the buddy
- * allocator.
+ * allocator. Commit the entire reservation here to prevent another
+ * process from stealing the pages as they are added to the pool but
+ * before they are reserved.
*/
needed += allocated;
+ resv_huge_pages += delta;
ret = 0;
free:
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
@@ -345,13 +375,14 @@ free:
enqueue_huge_page(page);
else {
/*
- * Decrement the refcount and free the page using its
- * destructor. This must be done with hugetlb_lock
+ * The page has a reference count of zero already, so
+ * call free_huge_page directly instead of using
+ * put_page. This must be done with hugetlb_lock
* unlocked which is safe because free_huge_page takes
* hugetlb_lock before deciding how to free the page.
*/
spin_unlock(&hugetlb_lock);
- put_page(page);
+ free_huge_page(page);
spin_lock(&hugetlb_lock);
}
}
@@ -370,9 +401,20 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
struct page *page;
unsigned long nr_pages;
+ /*
+ * We want to release as many surplus pages as possible, spread
+ * evenly across all nodes. Iterate across all nodes until we
+ * can no longer free unreserved surplus pages. This occurs when
+ * the nodes with surplus pages have no free pages.
+ */
+ unsigned long remaining_iterations = num_online_nodes();
+
+ /* Uncommit the reservation */
+ resv_huge_pages -= unused_resv_pages;
+
nr_pages = min(unused_resv_pages, surplus_huge_pages);
- while (nr_pages) {
+ while (remaining_iterations-- && nr_pages) {
nid = next_node(nid, node_online_map);
if (nid == MAX_NUMNODES)
nid = first_node(node_online_map);
@@ -390,6 +432,7 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
surplus_huge_pages--;
surplus_huge_pages_node[nid]--;
nr_pages--;
+ remaining_iterations = num_online_nodes();
}
}
}
@@ -401,7 +444,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
struct page *page;
spin_lock(&hugetlb_lock);
- page = dequeue_huge_page(vma, addr);
+ page = dequeue_huge_page_vma(vma, addr);
spin_unlock(&hugetlb_lock);
return page ? page : ERR_PTR(-VM_FAULT_OOM);
}
@@ -416,7 +459,7 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
spin_lock(&hugetlb_lock);
if (free_huge_pages > resv_huge_pages)
- page = dequeue_huge_page(vma, addr);
+ page = dequeue_huge_page_vma(vma, addr);
spin_unlock(&hugetlb_lock);
if (!page) {
page = alloc_buddy_huge_page(vma, addr);
@@ -569,7 +612,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
min_count = max(count, min_count);
try_to_free_low(min_count);
while (min_count < persistent_huge_pages) {
- struct page *page = dequeue_huge_page(NULL, 0);
+ struct page *page = dequeue_huge_page();
if (!page)
break;
update_and_free_page(page);
@@ -605,6 +648,17 @@ int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
return 0;
}
+int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+ struct file *file, void __user *buffer,
+ size_t *length, loff_t *ppos)
+{
+ proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+ spin_lock(&hugetlb_lock);
+ nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
+ spin_unlock(&hugetlb_lock);
+ return 0;
+}
+
#endif /* CONFIG_SYSCTL */
int hugetlb_report_meminfo(char *buf)
@@ -626,9 +680,11 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
{
return sprintf(buf,
"Node %d HugePages_Total: %5u\n"
- "Node %d HugePages_Free: %5u\n",
+ "Node %d HugePages_Free: %5u\n"
+ "Node %d HugePages_Surp: %5u\n",
nid, nr_huge_pages_node[nid],
- nid, free_huge_pages_node[nid]);
+ nid, free_huge_pages_node[nid],
+ nid, surplus_huge_pages_node[nid]);
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
@@ -813,6 +869,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
spin_unlock(&mm->page_table_lock);
copy_huge_page(new_page, old_page, address, vma);
+ __SetPageUptodate(new_page);
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & HPAGE_MASK);
@@ -858,6 +915,7 @@ retry:
goto out;
}
clear_huge_page(page, address);
+ __SetPageUptodate(page);
if (vma->vm_flags & VM_SHARED) {
int err;
@@ -1191,12 +1249,13 @@ static int hugetlb_acct_memory(long delta)
if (gather_surplus_pages(delta) < 0)
goto out;
- if (delta > cpuset_mems_nr(free_huge_pages_node))
+ if (delta > cpuset_mems_nr(free_huge_pages_node)) {
+ return_unused_surplus_pages(delta);
goto out;
+ }
}
ret = 0;
- resv_huge_pages += delta;
if (delta < 0)
return_unused_surplus_pages((unsigned long) -delta);