From 8fc63658681f32e6e29f6d1138de933d7272e0ec Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 30 Apr 2008 13:38:44 +0200 Subject: [S390] vmemmap: use clear_table to initialise page tables. Always use clear_table to initialise page tables. The overlapping memcpy is just a leftover of a previous version that wasn't fully converted to clear_table. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/vmem.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/s390/mm/vmem.c') diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 35d90a4720f..3ffc0211dc8 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -77,8 +77,7 @@ static inline pud_t *vmem_pud_alloc(void) pud = vmem_alloc_pages(2); if (!pud) return NULL; - pud_val(*pud) = _REGION3_ENTRY_EMPTY; - memcpy(pud + 1, pud, (PTRS_PER_PUD - 1)*sizeof(pud_t)); + clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); #endif return pud; } @@ -91,7 +90,7 @@ static inline pmd_t *vmem_pmd_alloc(void) pmd = vmem_alloc_pages(2); if (!pmd) return NULL; - clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4); + clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); #endif return pmd; } -- cgit v1.2.3 From 53492b1de46a7576170e865062ffcfc93bb5650b Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Wed, 30 Apr 2008 13:38:46 +0200 Subject: [S390] System z large page support. This adds hugetlbfs support on System z, using both hardware large page support if available and software large page emulation on older hardware. Shared (large) page tables are implemented in software emulation mode, by using page->index of the first tail page from a compound large page to store page table information. Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/mm/vmem.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 7 deletions(-) (limited to 'arch/s390/mm/vmem.c') diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 3ffc0211dc8..97bce6c9757 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -10,10 +10,12 @@ #include #include #include +#include #include #include #include #include +#include static DEFINE_MUTEX(vmem_mutex); @@ -113,7 +115,7 @@ static pte_t __init_refok *vmem_pte_alloc(void) /* * Add a physical memory range to the 1:1 mapping. */ -static int vmem_add_range(unsigned long start, unsigned long size) +static int vmem_add_range(unsigned long start, unsigned long size, int ro) { unsigned long address; pgd_t *pg_dir; @@ -140,7 +142,19 @@ static int vmem_add_range(unsigned long start, unsigned long size) pud_populate_kernel(&init_mm, pu_dir, pm_dir); } + pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); pm_dir = pmd_offset(pu_dir, address); + +#ifdef __s390x__ + if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && + (address + HPAGE_SIZE <= start + size) && + (address >= HPAGE_SIZE)) { + pte_val(pte) |= _SEGMENT_ENTRY_LARGE; + pmd_val(*pm_dir) = pte_val(pte); + address += HPAGE_SIZE - PAGE_SIZE; + continue; + } +#endif if (pmd_none(*pm_dir)) { pt_dir = vmem_pte_alloc(); if (!pt_dir) @@ -149,7 +163,6 @@ static int vmem_add_range(unsigned long start, unsigned long size) } pt_dir = pte_offset_kernel(pm_dir, address); - pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); *pt_dir = pte; } ret = 0; @@ -180,6 +193,13 @@ static void vmem_remove_range(unsigned long start, unsigned long size) pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) continue; + + if (pmd_huge(*pm_dir)) { + pmd_clear_kernel(pm_dir); + address += HPAGE_SIZE - PAGE_SIZE; + continue; + } + pt_dir = pte_offset_kernel(pm_dir, address); *pt_dir = pte; } @@ -248,14 +268,14 @@ out: return ret; } -static int vmem_add_mem(unsigned long start, unsigned long size) +static int vmem_add_mem(unsigned long start, unsigned long size, int ro) { int ret; ret = vmem_add_mem_map(start, size); if (ret) return ret; - return vmem_add_range(start, size); + return vmem_add_range(start, size, ro); } /* @@ -338,7 +358,7 @@ int add_shared_memory(unsigned long start, unsigned long size) if (ret) goto out_free; - ret = vmem_add_mem(start, size); + ret = vmem_add_mem(start, size, 0); if (ret) goto out_remove; @@ -374,14 +394,35 @@ out: */ void __init vmem_map_init(void) { + unsigned long ro_start, ro_end; + unsigned long start, end; int i; INIT_LIST_HEAD(&init_mm.context.crst_list); INIT_LIST_HEAD(&init_mm.context.pgtable_list); init_mm.context.noexec = 0; NODE_DATA(0)->node_mem_map = VMEM_MAP; - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) - vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); + ro_start = ((unsigned long)&_stext) & PAGE_MASK; + ro_end = PFN_ALIGN((unsigned long)&_eshared); + for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { + start = memory_chunk[i].addr; + end = memory_chunk[i].addr + memory_chunk[i].size; + if (start >= ro_end || end <= ro_start) + vmem_add_mem(start, end - start, 0); + else if (start >= ro_start && end <= ro_end) + vmem_add_mem(start, end - start, 1); + else if (start >= ro_start) { + vmem_add_mem(start, ro_end - start, 1); + vmem_add_mem(ro_end, end - ro_end, 0); + } else if (end < ro_end) { + vmem_add_mem(start, ro_start - start, 0); + vmem_add_mem(ro_start, end - ro_start, 1); + } else { + vmem_add_mem(start, ro_start - start, 0); + vmem_add_mem(ro_start, ro_end - ro_start, 1); + vmem_add_mem(ro_end, end - ro_end, 0); + } + } } /* -- cgit v1.2.3 From 17f345808563d2f425b2b15d60c4a5b00112e9eb Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 30 Apr 2008 13:38:47 +0200 Subject: [S390] Convert to SPARSEMEM & SPARSEMEM_VMEMMAP Convert s390 to SPARSEMEM and SPARSEMEM_VMEMMAP. We do a select of SPARSEMEM_VMEMMAP since it is configurable. This is because SPARSEMEM without SPARSEMEM_VMEMMAP gives us a hell of broken include dependencies that I don't want to fix. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/vmem.c | 81 ++++------------------------------------------------- 1 file changed, 6 insertions(+), 75 deletions(-) (limited to 'arch/s390/mm/vmem.c') diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 97bce6c9757..beccacf907f 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -27,43 +27,6 @@ struct memory_segment { static LIST_HEAD(mem_segs); -void __meminit memmap_init(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn) -{ - struct page *start, *end; - struct page *map_start, *map_end; - int i; - - start = pfn_to_page(start_pfn); - end = start + size; - - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { - unsigned long cstart, cend; - - cstart = PFN_DOWN(memory_chunk[i].addr); - cend = cstart + PFN_DOWN(memory_chunk[i].size); - - map_start = mem_map + cstart; - map_end = mem_map + cend; - - if (map_start < start) - map_start = start; - if (map_end > end) - map_end = end; - - map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) - / sizeof(struct page); - map_end += ((PFN_ALIGN((unsigned long) map_end) - - (unsigned long) map_end) - / sizeof(struct page)); - - if (map_start < map_end) - memmap_init_zone((unsigned long)(map_end - map_start), - nid, zone, page_to_pfn(map_start), - MEMMAP_EARLY); - } -} - static void __ref *vmem_alloc_pages(unsigned int order) { if (slab_is_available()) @@ -115,7 +78,7 @@ static pte_t __init_refok *vmem_pte_alloc(void) /* * Add a physical memory range to the 1:1 mapping. */ -static int vmem_add_range(unsigned long start, unsigned long size, int ro) +static int vmem_add_mem(unsigned long start, unsigned long size, int ro) { unsigned long address; pgd_t *pg_dir; @@ -209,10 +172,9 @@ static void vmem_remove_range(unsigned long start, unsigned long size) /* * Add a backed mem_map array to the virtual mem_map array. */ -static int vmem_add_mem_map(unsigned long start, unsigned long size) +int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) { unsigned long address, start_addr, end_addr; - struct page *map_start, *map_end; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; @@ -220,11 +182,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) pte_t pte; int ret = -ENOMEM; - map_start = VMEM_MAP + PFN_DOWN(start); - map_end = VMEM_MAP + PFN_DOWN(start + size); - - start_addr = (unsigned long) map_start & PAGE_MASK; - end_addr = PFN_ALIGN((unsigned long) map_end); + start_addr = (unsigned long) start; + end_addr = (unsigned long) (start + nr); for (address = start_addr; address < end_addr; address += PAGE_SIZE) { pg_dir = pgd_offset_k(address); @@ -268,16 +227,6 @@ out: return ret; } -static int vmem_add_mem(unsigned long start, unsigned long size, int ro) -{ - int ret; - - ret = vmem_add_mem_map(start, size); - if (ret) - return ret; - return vmem_add_range(start, size, ro); -} - /* * Add memory segment to the segment list if it doesn't overlap with * an already present segment. @@ -315,7 +264,7 @@ static void __remove_shared_memory(struct memory_segment *seg) vmem_remove_range(seg->start, seg->size); } -int remove_shared_memory(unsigned long start, unsigned long size) +int vmem_remove_mapping(unsigned long start, unsigned long size) { struct memory_segment *seg; int ret; @@ -339,11 +288,9 @@ out: return ret; } -int add_shared_memory(unsigned long start, unsigned long size) +int vmem_add_mapping(unsigned long start, unsigned long size) { struct memory_segment *seg; - struct page *page; - unsigned long pfn, num_pfn, end_pfn; int ret; mutex_lock(&vmem_mutex); @@ -361,21 +308,6 @@ int add_shared_memory(unsigned long start, unsigned long size) ret = vmem_add_mem(start, size, 0); if (ret) goto out_remove; - - pfn = PFN_DOWN(start); - num_pfn = PFN_DOWN(size); - end_pfn = pfn + num_pfn; - - page = pfn_to_page(pfn); - memset(page, 0, num_pfn * sizeof(struct page)); - - for (; pfn < end_pfn; pfn++) { - page = pfn_to_page(pfn); - init_page_count(page); - reset_page_mapcount(page); - SetPageReserved(page); - INIT_LIST_HEAD(&page->lru); - } goto out; out_remove: @@ -401,7 +333,6 @@ void __init vmem_map_init(void) INIT_LIST_HEAD(&init_mm.context.crst_list); INIT_LIST_HEAD(&init_mm.context.pgtable_list); init_mm.context.noexec = 0; - NODE_DATA(0)->node_mem_map = VMEM_MAP; ro_start = ((unsigned long)&_stext) & PAGE_MASK; ro_end = PFN_ALIGN((unsigned long)&_eshared); for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { -- cgit v1.2.3