diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2006-09-16 10:52:02 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2006-09-25 10:25:27 +0100 |
commit | 197c9444d6093b70c8faa24e7ab04a2423c9d14d (patch) | |
tree | 581916e32828c2454c099fdb443e2a7fefc5172e | |
parent | 51635ad282ead58b9d164f07e1fb62a9456b427c (diff) |
[ARM] 3814/1: move 80200 dma_inv_range() erratum check out of line
On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
clear the dirty bits, which means that if we invalidate a dirty line,
the dirty data can still be written back to memory later on.
To work around this, dma_inv_range() on these two processors is
implemented as dma_flush_range() (i.e. do a clean D-cache line before
doing the invalidate D-cache line.) For this, we currently have a
processor ID check in xscale_dma_inv_range(), but a better solution
is to add a separate cache_fns and proc_info for A0/A1 80200.
Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 58 |
1 files changed, 52 insertions, 6 deletions
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 3ca0c92e98a..e8b377d637f 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -311,12 +311,6 @@ ENTRY(xscale_flush_kern_dcache_page) * - end - virtual end address */ ENTRY(xscale_dma_inv_range) - mrc p15, 0, r2, c0, c0, 0 @ read ID - eor r2, r2, #0x69000000 - eor r2, r2, #0x00052000 - bics r2, r2, #1 - beq xscale_dma_flush_range - tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -375,6 +369,30 @@ ENTRY(xscale_cache_fns) .long xscale_dma_clean_range .long xscale_dma_flush_range +/* + * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't + * clear the dirty bits, which means that if we invalidate a dirty line, + * the dirty data can still be written back to external memory later on. + * + * The recommended workaround is to always do a clean D-cache line before + * doing an invalidate D-cache line, so on the affected processors, + * dma_inv_range() is implemented as dma_flush_range(). + * + * See erratum #25 of "Intel 80200 Processor Specification Update", + * revision January 22, 2003, available at: + * http://www.intel.com/design/iio/specupdt/273415.htm + */ +ENTRY(xscale_80200_A0_A1_cache_fns) + .long xscale_flush_kern_cache_all + .long xscale_flush_user_cache_all + .long xscale_flush_user_cache_range + .long xscale_coherent_kern_range + .long xscale_coherent_user_range + .long xscale_flush_kern_dcache_page + .long xscale_dma_flush_range + .long xscale_dma_clean_range + .long xscale_dma_flush_range + ENTRY(cpu_xscale_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHELINESIZE @@ -531,6 +549,11 @@ cpu_elf_name: .asciz "v5" .size cpu_elf_name, . - cpu_elf_name + .type cpu_80200_A0_A1_name, #object +cpu_80200_A0_A1_name: + .asciz "XScale-80200 A0/A1" + .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name + .type cpu_80200_name, #object cpu_80200_name: .asciz "XScale-80200" @@ -595,6 +618,29 @@ cpu_pxa270_name: .section ".proc.info.init", #alloc, #execinstr + .type __80200_A0_A1_proc_info,#object +__80200_A0_A1_proc_info: + .long 0x69052000 + .long 0xfffffffe + .long PMD_TYPE_SECT | \ + PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + .long PMD_TYPE_SECT | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + b __xscale_setup + .long cpu_arch_name + .long cpu_elf_name + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long cpu_80200_name + .long xscale_processor_functions + .long v4wbi_tlb_fns + .long xscale_mc_user_fns + .long xscale_80200_A0_A1_cache_fns + .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info + .type __80200_proc_info,#object __80200_proc_info: .long 0x69052000 |