From 2b688dfd0a93cf3b17c38feef693361da47b0606 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 12:55:04 +0200 Subject: x86: move __VMALLOC_RESERVE to pgtable_32.c Impact: cleanup The __VMALLOC_RESERVE global variable is not used in init_32.c. Move that to pgtable_32.c to reduce the diff between init_32.c and init_64.c. Signed-off-by: Pekka Enberg LKML-Reference: <1236077704.2675.4.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86/mm/init_32.c') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 06708ee94aa..5b06a2f5dea 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -50,8 +50,6 @@ #include #include -unsigned int __VMALLOC_RESERVE = 128 << 20; - unsigned long max_low_pfn_mapped; unsigned long max_pfn_mapped; -- cgit v1.2.3 From fd578f9c0a0a7bf3e460e6f21cdc6f4018949e80 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 12:55:05 +0200 Subject: x86: use roundup() instead of PAGE_ALIGN() in find_early_table_space() Impact: cleanup This patch changes find_early_table_space() to use roundup() for rounding up tables to page size to unify the common parts of the 32-bit and 64-bit implementations. Signed-off-by: Pekka Enberg LKML-Reference: <1236077705.2675.6.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86/mm/init_32.c') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5b06a2f5dea..1dd6b6334dc 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -845,10 +845,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse) unsigned long puds, pmds, ptes, tables, start; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; - tables = PAGE_ALIGN(puds * sizeof(pud_t)); + tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; - tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); + tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); if (use_pse) { unsigned long extra; @@ -859,10 +859,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse) } else ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; - tables += PAGE_ALIGN(ptes * sizeof(pte_t)); + tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); /* for fixmap */ - tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); + tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); /* * RED-PEN putting page tables only on node 0 could -- cgit v1.2.3 From 05f209e7b936a48e341d36831079116a06658ccc Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 13:15:02 +0200 Subject: x86: add sanity checks to init_32.c Impact: unification This patch adds sanity checks that are already in init_64.c to init_32.c. Signed-off-by: Pekka Enberg LKML-Reference: <1236078902.2675.16.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'arch/x86/mm/init_32.c') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 1dd6b6334dc..1570a822c18 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -1214,18 +1214,21 @@ void mark_rodata_ro(void) void free_init_pages(char *what, unsigned long begin, unsigned long end) { -#ifdef CONFIG_DEBUG_PAGEALLOC + unsigned long addr = begin; + + if (addr >= end) + return; + /* * If debugging page accesses then do not free this memory but * mark them not present - any buggy init-section access will * create a kernel page fault: */ +#ifdef CONFIG_DEBUG_PAGEALLOC printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", begin, PAGE_ALIGN(end)); set_memory_np(begin, (end - begin) >> PAGE_SHIFT); #else - unsigned long addr; - /* * We just marked the kernel text read only above, now that * we are going to free part of that, we need to make that @@ -1233,14 +1236,16 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) */ set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); - for (addr = begin; addr < end; addr += PAGE_SIZE) { + printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); + + for (; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); - memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); + memset((void *)(addr & ~(PAGE_SIZE-1)), + POISON_FREE_INITMEM, PAGE_SIZE); free_page(addr); totalram_pages++; } - printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); #endif } -- cgit v1.2.3 From e5b2bb552706ca0e30795ee84caacbb37cec5705 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 13:15:06 +0200 Subject: x86: unify free_init_pages() and free_initmem() Impact: unification This patch introduces a common arch/x86/mm/init.c and moves the identical free_init_pages() and free_initmem() functions to the file. Signed-off-by: Pekka Enberg LKML-Reference: <1236078906.2675.18.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 44 -------------------------------------------- 1 file changed, 44 deletions(-) (limited to 'arch/x86/mm/init_32.c') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 1570a822c18..cd8d6732613 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -1212,50 +1212,6 @@ void mark_rodata_ro(void) } #endif -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr = begin; - - if (addr >= end) - return; - - /* - * If debugging page accesses then do not free this memory but - * mark them not present - any buggy init-section access will - * create a kernel page fault: - */ -#ifdef CONFIG_DEBUG_PAGEALLOC - printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", - begin, PAGE_ALIGN(end)); - set_memory_np(begin, (end - begin) >> PAGE_SHIFT); -#else - /* - * We just marked the kernel text read only above, now that - * we are going to free part of that, we need to make that - * writeable first. - */ - set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); - - printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); - - for (; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *)(addr & ~(PAGE_SIZE-1)), - POISON_FREE_INITMEM, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } -#endif -} - -void free_initmem(void) -{ - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); -} - #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { -- cgit v1.2.3 From 867c5b5292583b1e474cbbcb4c77f09bfca3903c Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 3 Mar 2009 14:10:12 +0200 Subject: x86: set_highmem_pages_init() cleanup Impact: cleanup This patch moves set_highmem_pages_init() to arch/x86/mm/highmem_32.c. The declaration of the function is kept in asm/numa_32.h because asm/highmem.h is included only if CONFIG_HIGHMEM is enabled so we can't put the empty static inline function there. Signed-off-by: Pekka Enberg LKML-Reference: <1236082212.2675.24.camel@penberg-laptop> Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'arch/x86/mm/init_32.c') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index cd8d6732613..0b087dcd2c1 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -467,22 +467,10 @@ void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, work_with_active_regions(nid, add_highpages_work_fn, &data); } -#ifndef CONFIG_NUMA -static void __init set_highmem_pages_init(void) -{ - add_highpages_with_active_regions(0, highstart_pfn, highend_pfn); - - totalram_pages += totalhigh_pages; -} -#endif /* !CONFIG_NUMA */ - #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { } -static inline void set_highmem_pages_init(void) -{ -} #endif /* CONFIG_HIGHMEM */ void __init native_pagetable_setup_start(pgd_t *base) -- cgit v1.2.3