aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/mm/cache-sh4.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 08:49:07 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 08:49:07 -0700
commitb98adfccdf5f8dd34ae56a2d5adbe2c030bd4674 (patch)
tree1807a029520f550dd4f90c95ad0063bceb00d645 /arch/sh/mm/cache-sh4.c
parentba21fe71725f94792330ebc3034ef2b35a36276f (diff)
parent33573c0e3243aaa38b6ad96942de85a1b713c2ff (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6: (108 commits) sh: Fix occasional flush_cache_4096() stack corruption. sh: Calculate shm alignment at runtime. sh: dma-mapping compile fixes. sh: Initial vsyscall page support. sh: Clean up PAGE_SIZE definition for assembly use. sh: Selective flush_cache_mm() flushing. sh: More intelligent entry_mask/way_size calculation. sh: Support for L2 cache on newer SH-4A CPUs. sh: Update kexec support for API changes. sh: Optimized readsl()/writesl() support. sh: Report movli.l/movco.l capabilities. sh: CPU flags in AT_HWCAP in ELF auxvt. sh: Add support for 4K stacks. sh: Enable /proc/kcore support. sh: stack debugging support. sh: select CONFIG_EMBEDDED. sh: machvec rework. sh: Solution Engine SH7343 board support. sh: SH7710VoIPGW board support. sh: Enable verbose BUG() support. ...
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r--arch/sh/mm/cache-sh4.c685
1 files changed, 548 insertions, 137 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 524cea5b47f..e48cc22724d 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -2,49 +2,120 @@
* arch/sh/mm/cache-sh4.c
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
- * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
+ * Copyright (C) 2001 - 2006 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-
#include <linux/init.h>
-#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/threads.h>
#include <asm/addrspace.h>
-#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
-extern void __flush_cache_4096_all(unsigned long start);
-static void __flush_cache_4096_all_ex(unsigned long start);
-extern void __flush_dcache_all(void);
-static void __flush_dcache_all_ex(void);
+/*
+ * The maximum number of pages we support up to when doing ranged dcache
+ * flushing. Anything exceeding this will simply flush the dcache in its
+ * entirety.
+ */
+#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
+
+static void __flush_dcache_segment_1way(unsigned long start,
+ unsigned long extent);
+static void __flush_dcache_segment_2way(unsigned long start,
+ unsigned long extent);
+static void __flush_dcache_segment_4way(unsigned long start,
+ unsigned long extent);
+
+static void __flush_cache_4096(unsigned long addr, unsigned long phys,
+ unsigned long exec_offset);
+
+/*
+ * This is initialised here to ensure that it is not placed in the BSS. If
+ * that were to happen, note that cache_init gets called before the BSS is
+ * cleared, so this would get nulled out which would be hopeless.
+ */
+static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
+ (void (*)(unsigned long, unsigned long))0xdeadbeef;
+
+static void compute_alias(struct cache_info *c)
+{
+ c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
+ c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1;
+}
+
+static void __init emit_cache_params(void)
+{
+ printk("PVR=%08x CVR=%08x PRR=%08x\n",
+ ctrl_inl(CCN_PVR),
+ ctrl_inl(CCN_CVR),
+ ctrl_inl(CCN_PRR));
+ printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
+ cpu_data->icache.ways,
+ cpu_data->icache.sets,
+ cpu_data->icache.way_incr);
+ printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
+ cpu_data->icache.entry_mask,
+ cpu_data->icache.alias_mask,
+ cpu_data->icache.n_aliases);
+ printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
+ cpu_data->dcache.ways,
+ cpu_data->dcache.sets,
+ cpu_data->dcache.way_incr);
+ printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
+ cpu_data->dcache.entry_mask,
+ cpu_data->dcache.alias_mask,
+ cpu_data->dcache.n_aliases);
+
+ if (!__flush_dcache_segment_fn)
+ panic("unknown number of cache ways\n");
+}
/*
* SH-4 has virtually indexed and physically tagged cache.
*/
-struct semaphore p3map_sem[4];
+/* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
+#define MAX_P3_SEMAPHORES 16
+
+struct semaphore p3map_sem[MAX_P3_SEMAPHORES];
void __init p3_cache_init(void)
{
- if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE))
+ int i;
+
+ compute_alias(&cpu_data->icache);
+ compute_alias(&cpu_data->dcache);
+
+ switch (cpu_data->dcache.ways) {
+ case 1:
+ __flush_dcache_segment_fn = __flush_dcache_segment_1way;
+ break;
+ case 2:
+ __flush_dcache_segment_fn = __flush_dcache_segment_2way;
+ break;
+ case 4:
+ __flush_dcache_segment_fn = __flush_dcache_segment_4way;
+ break;
+ default:
+ __flush_dcache_segment_fn = NULL;
+ break;
+ }
+
+ emit_cache_params();
+
+ if (remap_area_pages(P3SEG, 0, PAGE_SIZE * 4, _PAGE_CACHABLE))
panic("%s failed.", __FUNCTION__);
- sema_init (&p3map_sem[0], 1);
- sema_init (&p3map_sem[1], 1);
- sema_init (&p3map_sem[2], 1);
- sema_init (&p3map_sem[3], 1);
+ for (i = 0; i < cpu_data->dcache.n_aliases; i++)
+ sema_init(&p3map_sem[i], 1);
}
/*
@@ -89,7 +160,6 @@ void __flush_purge_region(void *start, int size)
}
}
-
/*
* No write back please
*/
@@ -108,40 +178,6 @@ void __flush_invalidate_region(void *start, int size)
}
}
-static void __flush_dcache_all_ex(void)
-{
- unsigned long addr, end_addr, entry_offset;
-
- end_addr = CACHE_OC_ADDRESS_ARRAY + (cpu_data->dcache.sets << cpu_data->dcache.entry_shift) * cpu_data->dcache.ways;
- entry_offset = 1 << cpu_data->dcache.entry_shift;
- for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; addr += entry_offset) {
- ctrl_outl(0, addr);
- }
-}
-
-static void __flush_cache_4096_all_ex(unsigned long start)
-{
- unsigned long addr, entry_offset;
- int i;
-
- entry_offset = 1 << cpu_data->dcache.entry_shift;
- for (i = 0; i < cpu_data->dcache.ways; i++, start += cpu_data->dcache.way_incr) {
- for (addr = CACHE_OC_ADDRESS_ARRAY + start;
- addr < CACHE_OC_ADDRESS_ARRAY + 4096 + start;
- addr += entry_offset) {
- ctrl_outl(0, addr);
- }
- }
-}
-
-void flush_cache_4096_all(unsigned long start)
-{
- if (cpu_data->dcache.ways == 1)
- __flush_cache_4096_all(start);
- else
- __flush_cache_4096_all_ex(start);
-}
-
/*
* Write back the range of D-cache, and purge the I-cache.
*
@@ -153,14 +189,14 @@ void flush_icache_range(unsigned long start, unsigned long end)
}
/*
- * Write back the D-cache and purge the I-cache for signal trampoline.
+ * Write back the D-cache and purge the I-cache for signal trampoline.
* .. which happens to be the same behavior as flush_icache_range().
* So, we simply flush out a line.
*/
void flush_cache_sigtramp(unsigned long addr)
{
unsigned long v, index;
- unsigned long flags;
+ unsigned long flags;
int i;
v = addr & ~(L1_CACHE_BYTES-1);
@@ -172,30 +208,33 @@ void flush_cache_sigtramp(unsigned long addr)
local_irq_save(flags);
jump_to_P2();
- for(i = 0; i < cpu_data->icache.ways; i++, index += cpu_data->icache.way_incr)
+
+ for (i = 0; i < cpu_data->icache.ways;
+ i++, index += cpu_data->icache.way_incr)
ctrl_outl(0, index); /* Clear out Valid-bit */
+
back_to_P1();
+ wmb();
local_irq_restore(flags);
}
static inline void flush_cache_4096(unsigned long start,
unsigned long phys)
{
- unsigned long flags;
- extern void __flush_cache_4096(unsigned long addr, unsigned long phys, unsigned long exec_offset);
+ unsigned long flags, exec_offset = 0;
/*
- * SH7751, SH7751R, and ST40 have no restriction to handle cache.
- * (While SH7750 must do that at P2 area.)
+ * All types of SH-4 require PC to be in P2 to operate on the I-cache.
+ * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
*/
- if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG)
- || start < CACHE_OC_ADDRESS_ARRAY) {
- local_irq_save(flags);
- __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), 0x20000000);
- local_irq_restore(flags);
- } else {
- __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), 0);
- }
+ if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) ||
+ (start < CACHE_OC_ADDRESS_ARRAY))
+ exec_offset = 0x20000000;
+
+ local_irq_save(flags);
+ __flush_cache_4096(start | SH_CACHE_ASSOC,
+ P1SEGADDR(phys), exec_offset);
+ local_irq_restore(flags);
}
/*
@@ -206,15 +245,19 @@ void flush_dcache_page(struct page *page)
{
if (test_bit(PG_mapped, &page->flags)) {
unsigned long phys = PHYSADDR(page_address(page));
+ unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
+ int i, n;
/* Loop all the D-cache */
- flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys);
- flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys);
- flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys);
- flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys);
+ n = cpu_data->dcache.n_aliases;
+ for (i = 0; i < n; i++, addr += PAGE_SIZE)
+ flush_cache_4096(addr, phys);
}
+
+ wmb();
}
+/* TODO: Selective icache invalidation through IC address array.. */
static inline void flush_icache_all(void)
{
unsigned long flags, ccr;
@@ -227,34 +270,142 @@ static inline void flush_icache_all(void)
ccr |= CCR_CACHE_ICI;
ctrl_outl(ccr, CCR);
+ /*
+ * back_to_P1() will take care of the barrier for us, don't add
+ * another one!
+ */
+
back_to_P1();
local_irq_restore(flags);
}
+void flush_dcache_all(void)
+{
+ (*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size);
+ wmb();
+}
+
void flush_cache_all(void)
{
- if (cpu_data->dcache.ways == 1)
- __flush_dcache_all();
- else
- __flush_dcache_all_ex();
+ flush_dcache_all();
flush_icache_all();
}
+static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ unsigned long d = 0, p = start & PAGE_MASK;
+ unsigned long alias_mask = cpu_data->dcache.alias_mask;
+ unsigned long n_aliases = cpu_data->dcache.n_aliases;
+ unsigned long select_bit;
+ unsigned long all_aliases_mask;
+ unsigned long addr_offset;
+ pgd_t *dir;
+ pmd_t *pmd;
+ pud_t *pud;
+ pte_t *pte;
+ int i;
+
+ dir = pgd_offset(mm, p);
+ pud = pud_offset(dir, p);
+ pmd = pmd_offset(pud, p);
+ end = PAGE_ALIGN(end);
+
+ all_aliases_mask = (1 << n_aliases) - 1;
+
+ do {
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
+ p &= PMD_MASK;
+ p += PMD_SIZE;
+ pmd++;
+
+ continue;
+ }
+
+ pte = pte_offset_kernel(pmd, p);
+
+ do {
+ unsigned long phys;
+ pte_t entry = *pte;
+
+ if (!(pte_val(entry) & _PAGE_PRESENT)) {
+ pte++;
+ p += PAGE_SIZE;
+ continue;
+ }
+
+ phys = pte_val(entry) & PTE_PHYS_MASK;
+
+ if ((p ^ phys) & alias_mask) {
+ d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
+ d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
+
+ if (d == all_aliases_mask)
+ goto loop_exit;
+ }
+
+ pte++;
+ p += PAGE_SIZE;
+ } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
+ pmd++;
+ } while (p < end);
+
+loop_exit:
+ addr_offset = 0;
+ select_bit = 1;
+
+ for (i = 0; i < n_aliases; i++) {
+ if (d & select_bit) {
+ (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
+ wmb();
+ }
+
+ select_bit <<= 1;
+ addr_offset += PAGE_SIZE;
+ }
+}
+
+/*
+ * Note : (RPC) since the caches are physically tagged, the only point
+ * of flush_cache_mm for SH-4 is to get rid of aliases from the
+ * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
+ * lines can stay resident so long as the virtual address they were
+ * accessed with (hence cache set) is in accord with the physical
+ * address (i.e. tag). It's no different here. So I reckon we don't
+ * need to flush the I-cache, since aliases don't matter for that. We
+ * should try that.
+ *
+ * Caller takes mm->mmap_sem.
+ */
void flush_cache_mm(struct mm_struct *mm)
{
- /* Is there any good way? */
- /* XXX: possibly call flush_cache_range for each vm area */
- /*
- * FIXME: Really, the optimal solution here would be able to flush out
- * individual lines created by the specified context, but this isn't
- * feasible for a number of architectures (such as MIPS, and some
- * SPARC) .. is this possible for SuperH?
- *
- * In the meantime, we'll just flush all of the caches.. this
- * seems to be the simplest way to avoid at least a few wasted
- * cache flushes. -Lethal
+ /*
+ * If cache is only 4k-per-way, there are never any 'aliases'. Since
+ * the cache is physically tagged, the data can just be left in there.
*/
- flush_cache_all();
+ if (cpu_data->dcache.n_aliases == 0)
+ return;
+
+ /*
+ * Don't bother groveling around the dcache for the VMA ranges
+ * if there are too many PTEs to make it worthwhile.
+ */
+ if (mm->nr_ptes >= MAX_DCACHE_PAGES)
+ flush_dcache_all();
+ else {
+ struct vm_area_struct *vma;
+
+ /*
+ * In this case there are reasonably sized ranges to flush,
+ * iterate through the VMA list and take care of any aliases.
+ */
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
+ }
+
+ /* Only touch the icache if one of the VMAs has VM_EXEC set. */
+ if (mm->exec_vm)
+ flush_icache_all();
}
/*
@@ -263,27 +414,40 @@ void flush_cache_mm(struct mm_struct *mm)
* ADDR: Virtual Address (U0 address)
* PFN: Physical page number
*/
-void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn)
+void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
+ unsigned long pfn)
{
unsigned long phys = pfn << PAGE_SHIFT;
+ unsigned int alias_mask;
+
+ alias_mask = cpu_data->dcache.alias_mask;
/* We only need to flush D-cache when we have alias */
- if ((address^phys) & CACHE_ALIAS) {
+ if ((address^phys) & alias_mask) {
/* Loop 4K of the D-cache */
flush_cache_4096(
- CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS),
+ CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
phys);
/* Loop another 4K of the D-cache */
flush_cache_4096(
- CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS),
+ CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
phys);
}
- if (vma->vm_flags & VM_EXEC)
- /* Loop 4K (half) of the I-cache */
+ alias_mask = cpu_data->icache.alias_mask;
+ if (vma->vm_flags & VM_EXEC) {
+ /*
+ * Evict entries from the portion of the cache from which code
+ * may have been executed at this address (virtual). There's
+ * no need to evict from the portion corresponding to the
+ * physical address as for the D-cache, because we know the
+ * kernel has never executed the code through its identity
+ * translation.
+ */
flush_cache_4096(
- CACHE_IC_ADDRESS_ARRAY | (address & 0x1000),
+ CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
phys);
+ }
}
/*
@@ -298,52 +462,31 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigne
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- unsigned long p = start & PAGE_MASK;
- pgd_t *dir;
- pmd_t *pmd;
- pte_t *pte;
- pte_t entry;
- unsigned long phys;
- unsigned long d = 0;
-
- dir = pgd_offset(vma->vm_mm, p);
- pmd = pmd_offset(dir, p);
+ /*
+ * If cache is only 4k-per-way, there are never any 'aliases'. Since
+ * the cache is physically tagged, the data can just be left in there.
+ */
+ if (cpu_data->dcache.n_aliases == 0)
+ return;
- do {
- if (pmd_none(*pmd) || pmd_bad(*pmd)) {
- p &= ~((1 << PMD_SHIFT) -1);
- p += (1 << PMD_SHIFT);
- pmd++;
- continue;
- }
- pte = pte_offset_kernel(pmd, p);
- do {
- entry = *pte;
- if ((pte_val(entry) & _PAGE_PRESENT)) {
- phys = pte_val(entry)&PTE_PHYS_MASK;
- if ((p^phys) & CACHE_ALIAS) {
- d |= 1 << ((p & CACHE_ALIAS)>>12);
- d |= 1 << ((phys & CACHE_ALIAS)>>12);
- if (d == 0x0f)
- goto loop_exit;
- }
- }
- pte++;
- p += PAGE_SIZE;
- } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
- pmd++;
- } while (p < end);
- loop_exit:
- if (d & 1)
- flush_cache_4096_all(0);
- if (d & 2)
- flush_cache_4096_all(0x1000);
- if (d & 4)
- flush_cache_4096_all(0x2000);
- if (d & 8)
- flush_cache_4096_all(0x3000);
- if (vma->vm_flags & VM_EXEC)
+ /*
+ * Don't bother with the lookup and alias check if we have a
+ * wide range to cover, just blow away the dcache in its
+ * entirety instead. -- PFM.
+ */
+ if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
+ flush_dcache_all();
+ else
+ __flush_cache_mm(vma->vm_mm, start, end);
+
+ if (vma->vm_flags & VM_EXEC) {
+ /*
+ * TODO: Is this required??? Need to look at how I-cache
+ * coherency is assured when new programs are loaded to see if
+ * this matters.
+ */
flush_icache_all();
+ }
}
/*
@@ -357,5 +500,273 @@ void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len)
{
flush_cache_page(vma, addr, page_to_pfn(page));
+ mb();
+}
+
+/**
+ * __flush_cache_4096
+ *
+ * @addr: address in memory mapped cache array
+ * @phys: P1 address to flush (has to match tags if addr has 'A' bit
+ * set i.e. associative write)
+ * @exec_offset: set to 0x20000000 if flush has to be executed from P2
+ * region else 0x0
+ *
+ * The offset into the cache array implied by 'addr' selects the
+ * 'colour' of the virtual address range that will be flushed. The
+ * operation (purge/write-back) is selected by the lower 2 bits of
+ * 'phys'.
+ */
+static void __flush_cache_4096(unsigned long addr, unsigned long phys,
+ unsigned long exec_offset)
+{
+ int way_count;
+ unsigned long base_addr = addr;
+ struct cache_info *dcache;
+ unsigned long way_incr;
+ unsigned long a, ea, p;
+ unsigned long temp_pc;
+
+ dcache = &cpu_data->dcache;
+ /* Write this way for better assembly. */
+ way_count = dcache->ways;
+ way_incr = dcache->way_incr;
+
+ /*
+ * Apply exec_offset (i.e. branch to P2 if required.).
+ *
+ * FIXME:
+ *
+ * If I write "=r" for the (temp_pc), it puts this in r6 hence
+ * trashing exec_offset before it's been added on - why? Hence
+ * "=&r" as a 'workaround'
+ */
+ asm volatile("mov.l 1f, %0\n\t"
+ "add %1, %0\n\t"
+ "jmp @%0\n\t"
+ "nop\n\t"
+ ".balign 4\n\t"
+ "1: .long 2f\n\t"
+ "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
+
+ /*
+ * We know there will be >=1 iteration, so write as do-while to avoid
+ * pointless nead-of-loop check for 0 iterations.
+ */
+ do {
+ ea = base_addr + PAGE_SIZE;
+ a = base_addr;
+ p = phys;
+
+ do {
+ *(volatile unsigned long *)a = p;
+ /*
+ * Next line: intentionally not p+32, saves an add, p
+ * will do since only the cache tag bits need to
+ * match.
+ */
+ *(volatile unsigned long *)(a+32) = p;
+ a += 64;
+ p += 64;
+ } while (a < ea);
+
+ base_addr += way_incr;
+ } while (--way_count != 0);
}
+/*
+ * Break the 1, 2 and 4 way variants of this out into separate functions to
+ * avoid nearly all the overhead of having the conditional stuff in the function
+ * bodies (+ the 1 and 2 way cases avoid saving any registers too).
+ */
+static void __flush_dcache_segment_1way(unsigned long start,
+ unsigned long extent_per_way)
+{
+ unsigned long orig_sr, sr_with_bl;
+ unsigned long base_addr;
+ unsigned long way_incr, linesz, way_size;
+ struct cache_info *dcache;
+ register unsigned long a0, a0e;
+
+ asm volatile("stc sr, %0" : "=r" (orig_sr));
+ sr_with_bl = orig_sr | (1<<28);
+ base_addr = ((unsigned long)&empty_zero_page[0]);
+
+ /*
+ * The previous code aligned base_addr to 16k, i.e. the way_size of all
+ * existing SH-4 D-caches. Whilst I don't see a need to have this
+ * aligned to any better than the cache line size (which it will be
+ * anyway by construction), let's align it to at least the way_size of
+ * any existing or conceivable SH-4 D-cache. -- RPC
+ */
+ base_addr = ((base_addr >> 16) << 16);
+ base_addr |= start;
+
+ dcache = &cpu_data->dcache;
+ linesz = dcache->linesz;
+ way_incr = dcache->way_incr;
+ way_size = dcache->way_size;
+
+ a0 = base_addr;
+ a0e = base_addr + extent_per_way;
+ do {
+ asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
+ asm volatile("movca.l r0, @%0\n\t"
+ "ocbi @%0" : : "r" (a0));
+ a0 += linesz;
+ asm volatile("movca.l r0, @%0\n\t"
+ "ocbi @%0" : : "r" (a0));
+ a0 += linesz;
+ asm volatile("movca.l r0, @%0\n\t"
+ "ocbi @%0" : : "r" (a0));
+ a0 += linesz;
+ asm volatile("movca.l r0, @%0\n\t"
+ "ocbi @%0" : : "r" (a0));
+ asm volatile("ldc %0, sr" : : "r" (orig_sr));
+ a0 += linesz;
+ } while (a0 < a0e);
+}
+
+static void __flush_dcache_segment_2way(unsigned long start,
+ unsigned long extent_per_way)
+{
+ unsigned long orig_sr, sr_with_bl;
+ unsigned long base_addr;
+ unsigned long way_incr, linesz, way_size;
+ struct cache_info *dcache;
+ register unsigned long a0, a1, a0e;
+
+ asm volatile("stc sr, %0" : "=r" (orig_sr));
+ sr_with_bl = orig_sr | (1<<28);
+ base_addr = ((unsigned long)&empty_zero_page[0]);
+
+ /* See comment under 1-way above */
+ base_addr = ((base_addr >> 16) << 16);
+ base_addr |= start;
+
+ dcache = &cpu_data->dcache;
+ linesz = dcache->linesz;
+ way_incr = dcache->way_incr;
+ way_size = dcache->way_size;
+
+ a0 = base_addr;
+ a1 = a0 + way_incr;
+ a0e = base_addr + extent_per_way;
+ do {
+ asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
+ asm volatile("movca.l r0, @%0\n\t"
+ "movca.l r0, @%1\n\t"
+ "ocbi @%0\n\t"
+ "ocbi @%1" : :
+ "r" (a0), "r" (a1));
+ a0 += linesz;
+ a1 += linesz;
+ asm volatile("movca.l r0, @%0\n\t"
+ "movca.l r0, @%1\n\t"
+ "ocbi @%0\n\t"
+ "ocbi @%1" : :
+ "r" (a0), "r" (a1));
+ a0 += linesz;
+ a1 += linesz;
+ asm volatile("movca.l r0, @%0\n\t"
+ "movca.l r0, @%1\n\t"
+ "ocbi @%0\n\t"
+ "ocbi @%1" : :
+ "r" (a0), "r" (a1));
+ a0 += linesz;
+ a1 += linesz;
+ asm volatile("movca.l r0, @%0\n\t"
+ "movca.l r0, @%1\n\t"
+ "ocbi @%0\n\t"
+ "ocbi @%1" : :
+ "r" (a0), "r" (a1));
+ asm volatile("ldc %0, sr" : : "r" (orig_sr));
+ a0 += linesz;
+ a1 += linesz;
+ } while (a0 < a0e);
+}
+
+static void __flush_dcache_segment_4way(unsigned long start,
+ unsigned long extent_per_way)
+{
+ unsigned long orig_sr, sr_with_bl;
+ unsigned long base_addr;
+ unsigned long way_incr, linesz, way_size;
+ struct cache_info *dcache;
+ register unsigned long a0, a1, a2, a3, a0e;
+
+ asm volatile("stc sr, %0" : "=r" (orig_sr));
+ sr_with_bl = orig_sr | (1<<28);
+ base_addr = ((unsigned long)&empty_zero_page[0]);
+
+ /* See comment under 1-way above */
+ base_addr = ((base_addr >> 16) << 16);
+ base_addr |= start;
+
+ dcache = &cpu_data->dcache;
+ linesz = dcache->linesz;
+ way_incr = dcache->way_incr;
+ way_size = dcache->way_size;
+
+ a0 = base_addr;
+ a1 = a0 + way_incr;
+ a2 = a1 + way_incr;
+ a3 = a2 + way_incr;
+ a0e = base_addr + extent_per_way;
+ do {
+ asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
+ asm volatile("movca.l r0, @%0\n\t"
+ "movca.l r0, @%1\n\t"
+ "movca.l r0, @%2\n\t"
+ "movca.l r0, @%3\n\t"
+ "ocbi @%0\n\t"
+ "ocbi @%1\n\t"
+ "ocbi @%2\n\t"
+ "ocbi @%3\n\t" : :
+ "r" (a0), "r" (a1), "r" (a2), "r" (a3));
+ a0 += linesz;
+ a1 += linesz;
+ a2 += linesz;
+ a3 += linesz;
+ asm volatile("movca.l r0, @%0\n\t"
+ "movca.l r0, @%1\n\t"
+ "movca.l r0, @%2\n\t"
+ "movca.l r0, @%3\n\t"
+ "ocbi @%0\n\t"
+ "ocbi @%1\n\t"
+ "ocbi @%2\n\t"
+ "ocbi @%3\n\t" : :
+ "r" (a0), "r" (a1), "r" (a2), "r" (a3));
+ a0 += linesz;
+ a1 += linesz;
+ a2 += linesz;
+ a3 += linesz;
+ asm volatile("movca.l r0, @%0\n\t"
+ "movca.l r0, @%1\n\t"
+ "movca.l r0, @%2\n\t"
+ "movca.l r0, @%3\n\t"
+ "ocbi @%0\n\t"
+ "ocbi @%1\n\t"
+ "ocbi @%2\n\t"
+ "ocbi @%3\n\t" : :
+ "r" (a0), "r" (a1), "r" (a2), "r" (a3));
+ a0 += linesz;
+ a1 += linesz;
+ a2 += linesz;
+ a3 += linesz;
+ asm volatile("movca.l r0, @%0\n\t"
+ "movca.l r0, @%1\n\t"
+ "movca.l r0, @%2\n\t"
+ "movca.l r0, @%3\n\t"
+ "ocbi @%0\n\t"
+ "ocbi @%1\n\t"
+ "ocbi @%2\n\t"
+ "ocbi @%3\n\t" : :
+ "r" (a0), "r" (a1), "r" (a2), "r" (a3));
+ asm volatile("ldc %0, sr" : : "r" (orig_sr));
+ a0 += linesz;
+ a1 += linesz;
+ a2 += linesz;
+ a3 += linesz;
+ } while (a0 < a0e);
+}