aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mm/flush.c39
-rw-r--r--include/asm-arm/cacheflush.h10
2 files changed, 49 insertions, 0 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 628348c9f6c..9df507d36e0 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -202,3 +202,42 @@ void flush_dcache_page(struct page *page)
}
}
EXPORT_SYMBOL(flush_dcache_page);
+
+/*
+ * Flush an anonymous page so that users of get_user_pages()
+ * can safely access the data. The expected sequence is:
+ *
+ * get_user_pages()
+ * -> flush_anon_page
+ * memcpy() to/from page
+ * if written to page, flush_dcache_page()
+ */
+void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+{
+ unsigned long pfn;
+
+ /* VIPT non-aliasing caches need do nothing */
+ if (cache_is_vipt_nonaliasing())
+ return;
+
+ /*
+ * Write back and invalidate userspace mapping.
+ */
+ pfn = page_to_pfn(page);
+ if (cache_is_vivt()) {
+ flush_cache_page(vma, vmaddr, pfn);
+ } else {
+ /*
+ * For aliasing VIPT, we can flush an alias of the
+ * userspace address only.
+ */
+ flush_pfn_alias(pfn, vmaddr);
+ }
+
+ /*
+ * Invalidate kernel mapping. No data should be contained
+ * in this mapping of the page. FIXME: this is overkill
+ * since we actually ask for a write-back and invalidate.
+ */
+ __cpuc_flush_dcache_page(page_address(page));
+}
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index d51049522cd..5f531ea0305 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -357,6 +357,16 @@ extern void flush_dcache_page(struct page *);
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
+#define ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vmaddr)
+{
+ extern void __flush_anon_page(struct vm_area_struct *vma,
+ struct page *, unsigned long);
+ if (PageAnon(page))
+ __flush_anon_page(vma, page, vmaddr);
+}
+
#define flush_dcache_mmap_lock(mapping) \
write_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \