aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/mm/init.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-03 17:21:10 +0900
committerPaul Mundt <lethal@linux-sh.org>2009-09-03 17:21:10 +0900
commit0906a3ad33a254094fb74828e3ddb9af8771a6da (patch)
tree33acc1be2e213ae2f13439d3d5f8e9dd8a4f2d46 /arch/sh/mm/init.c
parentd1af119a69fc9a625bd57a66d9c9fa88795b082c (diff)
sh: Fix up and optimize the kmap_coherent() interface.
This fixes up the kmap_coherent/kunmap_coherent() interface for recent changes both in the page fault path and the shared cache flushers, as well as adding in some optimizations. One of the key things to note here is that the TLB flush itself is deferred until the unmap, and the call in to update_mmu_cache() itself goes away, relying on the regular page fault path to handle the lazy dcache writeback if necessary. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/init.c')
-rw-r--r--arch/sh/mm/init.c45
1 files changed, 25 insertions, 20 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 0a9b4d855bc..edc842ff61e 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -106,27 +106,31 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
- int pgd_idx;
+ pte_t *pte;
+ int i, j, k;
unsigned long vaddr;
- vaddr = start & PMD_MASK;
- end = (end + PMD_SIZE - 1) & PMD_MASK;
- pgd_idx = pgd_index(vaddr);
- pgd = pgd_base + pgd_idx;
-
- for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
- BUG_ON(pgd_none(*pgd));
- pud = pud_offset(pgd, 0);
- BUG_ON(pud_none(*pud));
- pmd = pmd_offset(pud, 0);
-
- if (!pmd_present(*pmd)) {
- pte_t *pte_table;
- pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, pte_table);
+ vaddr = start;
+ i = __pgd_offset(vaddr);
+ j = __pud_offset(vaddr);
+ k = __pmd_offset(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+ pud = (pud_t *)pgd;
+ for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+ pmd = (pmd_t *)pud;
+ for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, pte);
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ }
+ vaddr += PMD_SIZE;
+ }
+ k = 0;
}
-
- vaddr += PMD_SIZE;
+ j = 0;
}
}
#endif /* CONFIG_MMU */
@@ -137,7 +141,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long vaddr;
+ unsigned long vaddr, end;
int nid;
/* We don't need to map the kernel through the TLB, as
@@ -155,7 +159,8 @@ void __init paging_init(void)
* pte's will be filled in by __set_fixmap().
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- page_table_range_init(vaddr, 0, swapper_pg_dir);
+ end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
+ page_table_range_init(vaddr, end, swapper_pg_dir);
kmap_coherent_init();