From 2277ab4a1df50e05bc732fe9488d4e902bb8399a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 22 Jul 2009 19:20:49 +0900 Subject: sh: Migrate from PG_mapped to PG_dcache_dirty. This inverts the delayed dcache flush a bit to be more in line with other platforms. At the same time this also gives us the ability to do some more optimizations and cleanup. Now that the update_mmu_cache() callsite only tests for the bit, the implementation can gradually be split out and made generic, rather than relying on special implementations for each of the peculiar CPU types. SH7705 in 32kB mode and SH-4 still need slightly different handling, but this is something that can remain isolated in the varying page copy/clear routines. On top of that, SH-X3 is dcache coherent, so there is no need to bother with any of these tests in the PTEAEX version of update_mmu_cache(), so we kill that off too. Signed-off-by: Paul Mundt --- arch/sh/mm/pg-sh4.c | 74 +++++++++++++++++++++-------------------------------- 1 file changed, 29 insertions(+), 45 deletions(-) (limited to 'arch/sh/mm/pg-sh4.c') diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 2fe14da1f83..f3c4b2a54fc 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -15,8 +15,6 @@ #include #include -#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) - #define kmap_get_fixmap_pte(vaddr) \ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) @@ -68,10 +66,9 @@ static inline void kunmap_coherent(struct page *page) */ void clear_user_page(void *to, unsigned long address, struct page *page) { - __set_bit(PG_mapped, &page->flags); - clear_page(to); - if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS)) + + if (pages_do_alias((unsigned long)to, address & PAGE_MASK)) __flush_wback_region(to, PAGE_SIZE); } @@ -79,13 +76,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { - void *vto; - - __set_bit(PG_mapped, &page->flags); - - vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); - memcpy(vto, src, len); - kunmap_coherent(vto); + if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { + void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(vto, src, len); + kunmap_coherent(vto); + } else { + memcpy(dst, src, len); + set_bit(PG_dcache_dirty, &page->flags); + } if (vma->vm_flags & VM_EXEC) flush_cache_page(vma, vaddr, page_to_pfn(page)); @@ -95,13 +93,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { - void *vfrom; - - __set_bit(PG_mapped, &page->flags); - - vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); - memcpy(dst, vfrom, len); - kunmap_coherent(vfrom); + if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { + void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(dst, vfrom, len); + kunmap_coherent(vfrom); + } else { + memcpy(dst, src, len); + set_bit(PG_dcache_dirty, &page->flags); + } } void copy_user_highpage(struct page *to, struct page *from, @@ -109,14 +108,19 @@ void copy_user_highpage(struct page *to, struct page *from, { void *vfrom, *vto; - __set_bit(PG_mapped, &to->flags); - vto = kmap_atomic(to, KM_USER1); - vfrom = kmap_coherent(from, vaddr); - copy_page(vto, vfrom); - kunmap_coherent(vfrom); - if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS)) + if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) { + vfrom = kmap_coherent(from, vaddr); + copy_page(vto, vfrom); + kunmap_coherent(vfrom); + } else { + vfrom = kmap_atomic(from, KM_USER0); + copy_page(vto, vfrom); + kunmap_atomic(vfrom, KM_USER0); + } + + if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) __flush_wback_region(vto, PAGE_SIZE); kunmap_atomic(vto, KM_USER1); @@ -124,23 +128,3 @@ void copy_user_highpage(struct page *to, struct page *from, smp_wmb(); } EXPORT_SYMBOL(copy_user_highpage); - -/* - * For SH-4, we have our own implementation for ptep_get_and_clear - */ -pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = *ptep; - - pte_clear(mm, addr, ptep); - if (!pte_not_present(pte)) { - unsigned long pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - struct address_space *mapping = page_mapping(page); - if (!mapping || !mapping_writably_mapped(mapping)) - __clear_bit(PG_mapped, &page->flags); - } - } - return pte; -} -- cgit v1.2.3 From dfff0fa65ab15db45acd64b3189787d37ab163cd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 27 Jul 2009 20:53:22 +0900 Subject: sh: wire up clear_user_highpage() for sh4, convert sh7705. This wires up clear_user_highpage() on SH-4 and subsequently converts the SH7705 32kB cache mode over to using it. Now that the SH-4 implementation handles all of the dcache purging directly in the aliasing case, there is no need to do this in the default clear_page() implementation. Signed-off-by: Paul Mundt --- arch/sh/mm/pg-sh4.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'arch/sh/mm/pg-sh4.c') diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index f3c4b2a54fc..4d93070b822 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -2,7 +2,7 @@ * arch/sh/mm/pg-sh4.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2009 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ @@ -58,20 +58,6 @@ static inline void kunmap_coherent(struct page *page) preempt_check_resched(); } -/* - * clear_user_page - * @to: P1 address - * @address: U0 address to be mapped - * @page: page (virt_to_page(to)) - */ -void clear_user_page(void *to, unsigned long address, struct page *page) -{ - clear_page(to); - - if (pages_do_alias((unsigned long)to, address & PAGE_MASK)) - __flush_wback_region(to, PAGE_SIZE); -} - void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) @@ -128,3 +114,16 @@ void copy_user_highpage(struct page *to, struct page *from, smp_wmb(); } EXPORT_SYMBOL(copy_user_highpage); + +void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *kaddr = kmap_atomic(page, KM_USER0); + + clear_page(kaddr); + + if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) + __flush_wback_region(kaddr, PAGE_SIZE); + + kunmap_atomic(kaddr, KM_USER0); +} +EXPORT_SYMBOL(clear_user_highpage); -- cgit v1.2.3 From 0dfae7d5a21901b28ec0452d71be64adf5ea323e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 27 Jul 2009 21:30:17 +0900 Subject: sh: Use the now generic SH-4 clear/copy page ops for all MMU platforms. Now that the SH-4 page clear/copy ops are generic, they can be used for all platforms with CONFIG_MMU=y. SH-5 remains the odd one out, but it too will gradually be converted over to using this interface. SH-3 platforms which do not contain aliases will see no impact from this change, while aliasing SH-3 platforms will get the same interface as SH-4. Signed-off-by: Paul Mundt --- arch/sh/mm/pg-sh4.c | 129 ---------------------------------------------------- 1 file changed, 129 deletions(-) delete mode 100644 arch/sh/mm/pg-sh4.c (limited to 'arch/sh/mm/pg-sh4.c') diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c deleted file mode 100644 index 4d93070b822..00000000000 --- a/arch/sh/mm/pg-sh4.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * arch/sh/mm/pg-sh4.c - * - * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2002 - 2009 Paul Mundt - * - * Released under the terms of the GNU GPL v2.0. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#define kmap_get_fixmap_pte(vaddr) \ - pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) - -static pte_t *kmap_coherent_pte; - -void __init kmap_coherent_init(void) -{ - unsigned long vaddr; - - /* cache the first coherent kmap pte */ - vaddr = __fix_to_virt(FIX_CMAP_BEGIN); - kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); -} - -static inline void *kmap_coherent(struct page *page, unsigned long addr) -{ - enum fixed_addresses idx; - unsigned long vaddr, flags; - pte_t pte; - - inc_preempt_count(); - - idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; - vaddr = __fix_to_virt(FIX_CMAP_END - idx); - pte = mk_pte(page, PAGE_KERNEL); - - local_irq_save(flags); - flush_tlb_one(get_asid(), vaddr); - local_irq_restore(flags); - - update_mmu_cache(NULL, vaddr, pte); - - set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); - - return (void *)vaddr; -} - -static inline void kunmap_coherent(struct page *page) -{ - dec_preempt_count(); - preempt_check_resched(); -} - -void copy_to_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { - void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); - memcpy(vto, src, len); - kunmap_coherent(vto); - } else { - memcpy(dst, src, len); - set_bit(PG_dcache_dirty, &page->flags); - } - - if (vma->vm_flags & VM_EXEC) - flush_cache_page(vma, vaddr, page_to_pfn(page)); -} - -void copy_from_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { - void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); - memcpy(dst, vfrom, len); - kunmap_coherent(vfrom); - } else { - memcpy(dst, src, len); - set_bit(PG_dcache_dirty, &page->flags); - } -} - -void copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr, struct vm_area_struct *vma) -{ - void *vfrom, *vto; - - vto = kmap_atomic(to, KM_USER1); - - if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) { - vfrom = kmap_coherent(from, vaddr); - copy_page(vto, vfrom); - kunmap_coherent(vfrom); - } else { - vfrom = kmap_atomic(from, KM_USER0); - copy_page(vto, vfrom); - kunmap_atomic(vfrom, KM_USER0); - } - - if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) - __flush_wback_region(vto, PAGE_SIZE); - - kunmap_atomic(vto, KM_USER1); - /* Make sure this page is cleared on other CPU's too before using it */ - smp_wmb(); -} -EXPORT_SYMBOL(copy_user_highpage); - -void clear_user_highpage(struct page *page, unsigned long vaddr) -{ - void *kaddr = kmap_atomic(page, KM_USER0); - - clear_page(kaddr); - - if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) - __flush_wback_region(kaddr, PAGE_SIZE); - - kunmap_atomic(kaddr, KM_USER0); -} -EXPORT_SYMBOL(clear_user_highpage); -- cgit v1.2.3