diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-21 18:21:07 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-21 18:21:07 +0900 |
commit | 64a6d72213dd810dd55bd0a503c36150af41c3c3 (patch) | |
tree | 81f2f6e66d3a38f5cb7a27f0a85b365b25469fe4 /arch/sh/mm/cache-sh5.c | |
parent | f26b2a562b46ab186c8383993ab1332673ac4a47 (diff) |
sh: Kill off now redundant local irq disabling.
on_each_cpu() takes care of IRQ and preempt handling, the localized
handling in each of the called functions can be killed off.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache-sh5.c')
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 29 |
1 files changed, 6 insertions, 23 deletions
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 467ff8e260f..2f9dd6df00a 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -34,28 +34,22 @@ static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr) { - local_irq_disable(); sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); } static inline void sh64_teardown_dtlb_cache_slot(void) { sh64_teardown_tlb_slot(dtlb_cache_slot); - local_irq_enable(); } static inline void sh64_icache_inv_all(void) { unsigned long long addr, flag, data; - unsigned long flags; addr = ICCR0; flag = ICCR0_ICI; data = 0; - /* Make this a critical section for safety (probably not strictly necessary.) */ - local_irq_save(flags); - /* Without %1 it gets unexplicably wrong */ __asm__ __volatile__ ( "getcfg %3, 0, %0\n\t" @@ -64,8 +58,6 @@ static inline void sh64_icache_inv_all(void) "synci" : "=&r" (data) : "0" (data), "r" (flag), "r" (addr)); - - local_irq_restore(flags); } static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) @@ -90,7 +82,6 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long Also, eaddr is page-aligned. */ unsigned int cpu = smp_processor_id(); unsigned long long addr, end_addr; - unsigned long flags = 0; unsigned long running_asid, vma_asid; addr = eaddr; end_addr = addr + PAGE_SIZE; @@ -111,10 +102,9 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long running_asid = get_asid(); vma_asid = cpu_asid(cpu, vma->vm_mm); - if (running_asid != vma_asid) { - local_irq_save(flags); + if (running_asid != vma_asid) switch_and_save_asid(vma_asid); - } + while (addr < end_addr) { /* Worth unrolling a little */ __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); @@ -123,10 +113,9 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); addr += 128; } - if (running_asid != vma_asid) { + + if (running_asid != vma_asid) switch_and_save_asid(running_asid); - local_irq_restore(flags); - } } static void sh64_icache_inv_user_page_range(struct mm_struct *mm, @@ -159,16 +148,12 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, unsigned long eaddr; unsigned long after_last_page_start; unsigned long mm_asid, current_asid; - unsigned long flags = 0; mm_asid = cpu_asid(smp_processor_id(), mm); current_asid = get_asid(); - if (mm_asid != current_asid) { - /* Switch ASID and run the invalidate loop under cli */ - local_irq_save(flags); + if (mm_asid != current_asid) switch_and_save_asid(mm_asid); - } aligned_start = start & PAGE_MASK; after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); @@ -194,10 +179,8 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, aligned_start = vma->vm_end; /* Skip to start of next region */ } - if (mm_asid != current_asid) { + if (mm_asid != current_asid) switch_and_save_asid(current_asid); - local_irq_restore(flags); - } } } |