From d73e60b7144a86baf0fdfcc9537a70bb4f72e11c Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 31 Oct 2008 13:08:02 +0000 Subject: [ARM] copypage: convert assembly files to C Signed-off-by: Russell King --- arch/arm/mm/copypage-v3.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 arch/arm/mm/copypage-v3.c (limited to 'arch/arm/mm/copypage-v3.c') diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c new file mode 100644 index 00000000000..184911089e6 --- /dev/null +++ b/arch/arm/mm/copypage-v3.c @@ -0,0 +1,69 @@ +/* + * linux/arch/arm/mm/copypage-v3.c + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include + +#include + +/* + * ARMv3 optimised copy_user_page + * + * FIXME: do we need to handle cache stuff... + */ +void __attribute__((naked)) +v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\n\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %2 @ 1\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ +1: stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmneia %0!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); +} + +/* + * ARMv3 optimised clear_user_page + * + * FIXME: do we need to handle cache stuff... + */ +void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\n\ + str lr, [sp, #-4]!\n\ + mov r1, %1 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1\n\ + ldr pc, [sp], #4" + : + : "r" (kaddr), "I" (PAGE_SIZE / 64)); +} + +struct cpu_user_fns v3_user_fns __initdata = { + .cpu_clear_user_page = v3_clear_user_page, + .cpu_copy_user_page = v3_copy_user_page, +}; -- cgit v1.2.3 From 063b0a4207e43acbeff3d4b09f43e750e0212b48 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 31 Oct 2008 15:08:35 +0000 Subject: [ARM] copypage: provide our own copy_user_highpage() We used to override the copy_user_page() function. However, this is not only inefficient, it also causes additional complexity for highmem support, since we convert from a struct page to a kernel direct mapped address and back to a struct page again. Moreover, with highmem support, we end up pointlessly setting up kmap entries for pages which we're going to remap. So, push the kmapping down into the copypage implementation files where it's required. Signed-off-by: Russell King --- arch/arm/mm/copypage-v3.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm/copypage-v3.c') diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 184911089e6..52df8f04d3f 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -8,16 +8,15 @@ * published by the Free Software Foundation. */ #include - -#include +#include /* - * ARMv3 optimised copy_user_page + * ARMv3 optimised copy_user_highpage * * FIXME: do we need to handle cache stuff... */ -void __attribute__((naked)) -v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +v3_copy_user_page(void *kto, const void *kfrom) { asm("\n\ stmfd sp!, {r4, lr} @ 2\n\ @@ -38,6 +37,18 @@ v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); } +void v3_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v3_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + /* * ARMv3 optimised clear_user_page * @@ -65,5 +76,5 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns v3_user_fns __initdata = { .cpu_clear_user_page = v3_clear_user_page, - .cpu_copy_user_page = v3_copy_user_page, + .cpu_copy_user_highpage = v3_copy_user_highpage, }; -- cgit v1.2.3 From 303c6443659bc1dc911356f5de149f48ff1d97b8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 31 Oct 2008 16:32:19 +0000 Subject: [ARM] clearpage: provide our own clear_user_highpage() For similar reasons as copy_user_page(), we want to avoid the additional kmap_atomic if it's unnecessary. Signed-off-by: Russell King --- arch/arm/mm/copypage-v3.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm/copypage-v3.c') diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 52df8f04d3f..13ce0baa6ba 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -54,10 +54,10 @@ void v3_copy_user_highpage(struct page *to, struct page *from, * * FIXME: do we need to handle cache stuff... */ -void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) +void v3_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\n\ - str lr, [sp, #-4]!\n\ mov r1, %1 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ @@ -68,13 +68,14 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ - bne 1b @ 1\n\ - ldr pc, [sp], #4" + bne 1b @ 1" : - : "r" (kaddr), "I" (PAGE_SIZE / 64)); + : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v3_user_fns __initdata = { - .cpu_clear_user_page = v3_clear_user_page, + .cpu_clear_user_highpage = v3_clear_user_highpage, .cpu_copy_user_highpage = v3_copy_user_highpage, }; -- cgit v1.2.3 From 43ae286b7d4d8c4983bc263ef2e3cccc10dedb2b Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 4 Nov 2008 02:42:27 -0500 Subject: [ARM] fix a couple clear_user_highpage assembly constraints In all cases the kaddr is assigned an input register even though it is modified in the assembly code. Let's assign a new variable to the modified value and mark those inline asm with volatile otherwise they get optimized away because the output variable is otherwise not used. Also fix a few conversion errors in copypage-feroceon.c and copypage-v4mc.c. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/copypage-v3.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm/copypage-v3.c') diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 13ce0baa6ba..70ed96c8af8 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -56,9 +56,9 @@ void v3_copy_user_highpage(struct page *to, struct page *from, */ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\n\ - mov r1, %1 @ 1\n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\n\ + mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ @@ -69,8 +69,8 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1" - : - : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } -- cgit v1.2.3