From 5af60839909b8e3b28ca7cd7912fa0b23475617f Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:56 -0700 Subject: slab allocators: Remove obsolete SLAB_MUST_HWCACHE_ALIGN This patch was recently posted to lkml and acked by Pekka. The flag SLAB_MUST_HWCACHE_ALIGN is 1. Never checked by SLAB at all. 2. A duplicate of SLAB_HWCACHE_ALIGN for SLUB 3. Fulfills the role of SLAB_HWCACHE_ALIGN for SLOB. The only remaining use is in sparc64 and ppc64 and their use there reflects some earlier role that the slab flag once may have had. If its specified then SLAB_HWCACHE_ALIGN is also specified. The flag is confusing, inconsistent and has no purpose. Remove it. Acked-by: Pekka Enberg Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/mm/hugetlbpage.c | 3 +-- arch/powerpc/mm/init_64.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8508f973d9c..c8814177b71 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -1057,8 +1057,7 @@ static int __init hugetlbpage_init(void) huge_pgtable_cache = kmem_cache_create("hugepte_cache", HUGEPTE_TABLE_SIZE, HUGEPTE_TABLE_SIZE, - SLAB_HWCACHE_ALIGN | - SLAB_MUST_HWCACHE_ALIGN, + SLAB_HWCACHE_ALIGN, zero_ctor, NULL); if (! huge_pgtable_cache) panic("hugetlbpage_init(): could not create hugepte cache\n"); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d12a87ec5ae..5a7750147b7 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -183,8 +183,7 @@ void pgtable_cache_init(void) "for size: %08x...\n", name, i, size); pgtable_cache[i] = kmem_cache_create(name, size, size, - SLAB_HWCACHE_ALIGN | - SLAB_MUST_HWCACHE_ALIGN, + SLAB_HWCACHE_ALIGN, zero_ctor, NULL); if (! pgtable_cache[i]) -- cgit v1.2.3 From f0f3980b21508bd573eff1746d469436f50a903d Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:49:58 -0700 Subject: slab allocators: remove multiple alignment specifications It is not necessary to tell the slab allocators to align to a cacheline if an explicit alignment was already specified. It is rather confusing to specify multiple alignments. Make sure that the call sites only use one form of alignment. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/mm/hugetlbpage.c | 2 +- arch/powerpc/mm/init_64.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index c8814177b71..d0ec887f05a 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -1057,7 +1057,7 @@ static int __init hugetlbpage_init(void) huge_pgtable_cache = kmem_cache_create("hugepte_cache", HUGEPTE_TABLE_SIZE, HUGEPTE_TABLE_SIZE, - SLAB_HWCACHE_ALIGN, + 0, zero_ctor, NULL); if (! huge_pgtable_cache) panic("hugetlbpage_init(): could not create hugepte cache\n"); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 5a7750147b7..4416d5140c5 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -183,7 +183,7 @@ void pgtable_cache_init(void) "for size: %08x...\n", name, i, size); pgtable_cache[i] = kmem_cache_create(name, size, size, - SLAB_HWCACHE_ALIGN, + 0, zero_ctor, NULL); if (! pgtable_cache[i]) -- cgit v1.2.3 From d506a7725114aaddbf982fd18621b3e0e5c27f1b Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Sun, 6 May 2007 14:50:02 -0700 Subject: get_unmapped_area handles MAP_FIXED on powerpc The current get_unmapped_area code calls the f_ops->get_unmapped_area or the arch one (via the mm) only when MAP_FIXED is not passed. That makes it impossible for archs to impose proper constraints on regions of the virtual address space. To work around that, get_unmapped_area() then calls some hugetlbfs specific hacks. This cause several problems, among others: - It makes it impossible for a driver or filesystem to do the same thing that hugetlbfs does (for example, to allow a driver to use larger page sizes to map external hardware) if that requires applying a constraint on the addresses (constraining that mapping in certain regions and other mappings out of those regions). - Some archs like arm, mips, sparc, sparc64, sh and sh64 already want MAP_FIXED to be passed down in order to deal with aliasing issues. The code is there to handle it... but is never called. This series of patches moves the logic to handle MAP_FIXED down to the various arch/driver get_unmapped_area() implementations, and then changes the generic code to always call them. The hugetlbfs hacks then disappear from the generic code. Since I need to do some special 64K pages mappings for SPEs on cell, I need to work around the first problem at least. I have further patches thus implementing a "slices" layer that handles multiple page sizes through slices of the address space for use by hugetlbfs, the SPE code, and possibly others, but it requires that serie of patches first/ There is still a potential (but not practical) issue due to the fact that filesystems/drivers implemeting g_u_a will effectively bypass all arch checks. This is not an issue in practice as the only filesystems/drivers using that hook are doing so for arch specific purposes in the first place. There is also a problem with mremap that will completely bypass all arch checks. I'll try to address that separately, I'm not 100% certain yet how, possibly by making it not work when the vma has a file whose f_ops has a get_unmapped_area callback, and by making it use is_hugepage_only_range() before expanding into a new area. Also, I want to turn is_hugepage_only_range() into a more generic is_normal_page_range() as that's really what it will end up meaning when used in stack grow, brk grow and mremap. None of the above "issues" however are introduced by this patch, they are already there, so I think the patch can go ini for 2.6.22. This patch: Handle MAP_FIXED in powerpc's arch_get_unmapped_area() in all 3 implementations of it. Signed-off-by: Benjamin Herrenschmidt Acked-by: William Irwin Cc: Paul Mackerras Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Russell King Cc: David Howells Cc: Andi Kleen Cc: "Luck, Tony" Cc: Kyle McMartin Cc: Grant Grundler Cc: Matthew Wilcox Cc: "David S. Miller" Cc: Adam Litke Cc: David Gibson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/mm/hugetlbpage.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index d0ec887f05a..1f07f70ac89 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -566,6 +566,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, if (len > TASK_SIZE) return -ENOMEM; + /* handle fixed mapping: prevent overlap with huge pages */ + if (flags & MAP_FIXED) { + if (is_hugepage_only_range(mm, addr, len)) + return -EINVAL; + return addr; + } + if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); @@ -641,6 +648,13 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (len > TASK_SIZE) return -ENOMEM; + /* handle fixed mapping: prevent overlap with huge pages */ + if (flags & MAP_FIXED) { + if (is_hugepage_only_range(mm, addr, len)) + return -EINVAL; + return addr; + } + /* dont allow allocations above current base */ if (mm->free_area_cache > base) mm->free_area_cache = base; @@ -823,6 +837,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, /* Paranoia, caller should have dealt with this */ BUG_ON((addr + len) < addr); + /* Handle MAP_FIXED */ + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(addr, len, pgoff)) + return -EINVAL; + return addr; + } + if (test_thread_flag(TIF_32BIT)) { curareas = current->mm->context.low_htlb_areas; -- cgit v1.2.3