aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/mm/hugetlbpage.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index d0ec887f05a..1f07f70ac89 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -566,6 +566,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (len > TASK_SIZE)
return -ENOMEM;
+ /* handle fixed mapping: prevent overlap with huge pages */
+ if (flags & MAP_FIXED) {
+ if (is_hugepage_only_range(mm, addr, len))
+ return -EINVAL;
+ return addr;
+ }
+
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
@@ -641,6 +648,13 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (len > TASK_SIZE)
return -ENOMEM;
+ /* handle fixed mapping: prevent overlap with huge pages */
+ if (flags & MAP_FIXED) {
+ if (is_hugepage_only_range(mm, addr, len))
+ return -EINVAL;
+ return addr;
+ }
+
/* dont allow allocations above current base */
if (mm->free_area_cache > base)
mm->free_area_cache = base;
@@ -823,6 +837,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
/* Paranoia, caller should have dealt with this */
BUG_ON((addr + len) < addr);
+ /* Handle MAP_FIXED */
+ if (flags & MAP_FIXED) {
+ if (prepare_hugepage_range(addr, len, pgoff))
+ return -EINVAL;
+ return addr;
+ }
+
if (test_thread_flag(TIF_32BIT)) {
curareas = current->mm->context.low_htlb_areas;