aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-04-19 13:29:16 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org.(none)>2005-04-19 13:29:16 -0700
commit3bf5ee95648c694bac4d13529563c230cd4fe5f2 (patch)
tree9430e6e4f4c3d586ecb7375cd780fd17694888c7 /include
parentee39b37b23da0b6ec53a8ebe90ff41c016f8ae27 (diff)
[PATCH] freepgt: hugetlb_free_pgd_range
ia64 and ppc64 had hugetlb_free_pgtables functions which were no longer being called, and it wasn't obvious what to do about them. The ppc64 case turns out to be easy: the associated tables are noted elsewhere and freed later, safe to either skip its hugetlb areas or go through the motions of freeing nothing. Since ia64 does need a special case, restore to ppc64 the special case of skipping them. The ia64 hugetlb case has been broken since pgd_addr_end went in, though it probably appeared to work okay if you just had one such area; in fact it's been broken much longer if you consider a long munmap spanning from another region into the hugetlb region. In the ia64 hugetlb region, more virtual address bits are available than in the other regions, yet the page tables are structured the same way: the page at the bottom is larger. Here we need to scale down each addr before passing it to the standard free_pgd_range. Was about to write a hugely_scaled_down macro, but found htlbpage_to_page already exists for just this purpose. Fixed off-by-one in ia64 is_hugepage_only_range. Uninline free_pgd_range to make it available to ia64. Make sure the vma-gathering loop in free_pgtables cannot join a hugepage_only_range to any other (safe to join huges? probably but don't bother). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/page.h2
-rw-r--r--include/asm-ia64/pgtable.h4
-rw-r--r--include/asm-ppc64/pgtable.h12
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/mm.h4
5 files changed, 19 insertions, 9 deletions
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 24aab801a8c..08894f73abf 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -139,7 +139,7 @@ typedef union ia64_va {
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
# define is_hugepage_only_range(mm, addr, len) \
(REGION_NUMBER(addr) == REGION_HPAGE && \
- REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
+ REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE)
extern unsigned int hpage_shift;
#endif
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 1757a811f43..bbf6dd75700 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -472,8 +472,8 @@ extern struct page *zero_page_memmap_ptr;
#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
struct mmu_gather;
-extern void hugetlb_free_pgtables(struct mmu_gather *tlb,
- struct vm_area_struct * prev, unsigned long start, unsigned long end);
+void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+ unsigned long end, unsigned long floor, unsigned long ceiling);
#endif
/*
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index 4c4824653e8..33b90e2aa47 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -500,9 +500,15 @@ extern pgd_t ioremap_dir[1024];
extern void paging_init(void);
-struct mmu_gather;
-void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
- unsigned long start, unsigned long end);
+/*
+ * Because the huge pgtables are only 2 level, they can take
+ * at most around 4M, much less than one hugepage which the
+ * process is presumably entitled to use. So we don't bother
+ * freeing up the pagetables on unmap, and wait until
+ * destroy_context() to clean up the lot.
+ */
+#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
+ do { } while (0)
/*
* This gets called at the end of handling a page fault, when
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ae45676d27b..6af1ae4a821 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -37,7 +37,8 @@ extern int sysctl_hugetlb_shm_group;
#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
+#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
+ do { } while (0)
#endif
#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
@@ -72,7 +73,8 @@ static inline unsigned long hugetlb_total_pages(void)
#define prepare_hugepage_range(addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
+#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
+ do { } while (0)
#define alloc_huge_page() ({ NULL; })
#define free_huge_page(p) ({ (void)(p); BUG(); })
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 59eca28b5ae..c74a74ca401 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -587,7 +587,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm,
struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *);
-void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
+void free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+ unsigned long end, unsigned long floor, unsigned long ceiling);
+void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);