diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2006-06-29 20:17:15 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2006-06-29 22:14:30 +0100 |
commit | ff0daca525dde796382b9ccd563f169df2571211 (patch) | |
tree | acb3feb60cec39f316447f702a2277a7448cbad9 /include | |
parent | ba53201180e267bd1f0792e6c375ced7c100738e (diff) |
[ARM] Add section support to ioremap
Allow section mappings to be setup using ioremap() and torn down
with iounmap(). This requires additional support in the MM
context switch to ensure that mappings are properly synchronised
when mapped in.
Based an original implementation by Deepak Saxena, reworked and
ARMv6 support added by rmk.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-arm/memory.h | 5 | ||||
-rw-r--r-- | include/asm-arm/mmu.h | 1 | ||||
-rw-r--r-- | include/asm-arm/mmu_context.h | 12 |
3 files changed, 17 insertions, 1 deletions
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h index 94f973b704f..176a4fb0498 100644 --- a/include/asm-arm/memory.h +++ b/include/asm-arm/memory.h @@ -68,6 +68,11 @@ */ #define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff)) +/* + * Allow 2MB-aligned ioremap pages + */ +#define IOREMAP_MAX_ORDER 21 + #else /* CONFIG_MMU */ /* diff --git a/include/asm-arm/mmu.h b/include/asm-arm/mmu.h index 23dde52e094..fe2a23b5627 100644 --- a/include/asm-arm/mmu.h +++ b/include/asm-arm/mmu.h @@ -7,6 +7,7 @@ typedef struct { #if __LINUX_ARM_ARCH__ >= 6 unsigned int id; #endif + unsigned int kvm_seq; } mm_context_t; #if __LINUX_ARM_ARCH__ >= 6 diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h index 9fadb01e030..d1a65b1edca 100644 --- a/include/asm-arm/mmu_context.h +++ b/include/asm-arm/mmu_context.h @@ -17,6 +17,8 @@ #include <asm/cacheflush.h> #include <asm/proc-fns.h> +void __check_kvm_seq(struct mm_struct *mm); + #if __LINUX_ARM_ARCH__ >= 6 /* @@ -45,13 +47,21 @@ static inline void check_context(struct mm_struct *mm) { if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) __new_context(mm); + + if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) + __check_kvm_seq(mm); } #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) #else -#define check_context(mm) do { } while (0) +static inline void check_context(struct mm_struct *mm) +{ + if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) + __check_kvm_seq(mm); +} + #define init_new_context(tsk,mm) 0 #endif |