From 322ae8eb91c1730728400c5b8dd1108aef1205b8 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Fri, 27 Mar 2009 14:25:21 +0100 Subject: microblaze_v8: supported function for memory - kernel/lib Reviewed-by: Ingo Molnar Acked-by: Stephen Neuendorffer Acked-by: John Linn Acked-by: John Williams Signed-off-by: Michal Simek --- arch/microblaze/lib/fastcopy.S | 662 +++++++++++++++++++++++++++++++++++++++++ arch/microblaze/lib/memcpy.c | 161 ++++++++++ arch/microblaze/lib/memmove.c | 175 +++++++++++ arch/microblaze/lib/memset.c | 82 +++++ 4 files changed, 1080 insertions(+) create mode 100644 arch/microblaze/lib/fastcopy.S create mode 100644 arch/microblaze/lib/memcpy.c create mode 100644 arch/microblaze/lib/memmove.c create mode 100644 arch/microblaze/lib/memset.c (limited to 'arch/microblaze/lib') diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S new file mode 100644 index 00000000000..02e3ab4eddf --- /dev/null +++ b/arch/microblaze/lib/fastcopy.S @@ -0,0 +1,662 @@ +/* + * Copyright (C) 2008-2009 Michal Simek + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2008 Jim Law - Iris LP All rights reserved. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + * Written by Jim Law + * + * intended to replace: + * memcpy in memcpy.c and + * memmove in memmove.c + * ... in arch/microblaze/lib + * + * + * assly_fastcopy.S + * + * Attempt at quicker memcpy and memmove for MicroBlaze + * Input : Operand1 in Reg r5 - destination address + * Operand2 in Reg r6 - source address + * Operand3 in Reg r7 - number of bytes to transfer + * Output: Result in Reg r3 - starting destinaition address + * + * + * Explanation: + * Perform (possibly unaligned) copy of a block of memory + * between mem locations with size of xfer spec'd in bytes + */ + +#include + + .globl memcpy + .ent memcpy + +memcpy: +fast_memcpy_ascending: + /* move d to return register as value of function */ + addi r3, r5, 0 + + addi r4, r0, 4 /* n = 4 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + blti r4, a_xfer_end /* if n < 0, less than one word to transfer */ + + /* transfer first 0~3 bytes to get aligned dest address */ + andi r4, r5, 3 /* n = d & 3 */ + /* if zero, destination already aligned */ + beqi r4, a_dalign_done + /* n = 4 - n (yields 3, 2, 1 transfers for 1, 2, 3 addr offset) */ + rsubi r4, r4, 4 + rsub r7, r4, r7 /* c = c - n adjust c */ + +a_xfer_first_loop: + /* if no bytes left to transfer, transfer the bulk */ + beqi r4, a_dalign_done + lbui r11, r6, 0 /* h = *s */ + sbi r11, r5, 0 /* *d = h */ + addi r6, r6, 1 /* s++ */ + addi r5, r5, 1 /* d++ */ + brid a_xfer_first_loop /* loop */ + addi r4, r4, -1 /* n-- (IN DELAY SLOT) */ + +a_dalign_done: + addi r4, r0, 32 /* n = 32 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + /* if n < 0, less than one block to transfer */ + blti r4, a_block_done + +a_block_xfer: + andi r4, r7, 0xffffffe0 /* n = c & ~31 */ + rsub r7, r4, r7 /* c = c - n */ + + andi r9, r6, 3 /* t1 = s & 3 */ + /* if temp != 0, unaligned transfers needed */ + bnei r9, a_block_unaligned + +a_block_aligned: + lwi r9, r6, 0 /* t1 = *(s + 0) */ + lwi r10, r6, 4 /* t2 = *(s + 4) */ + lwi r11, r6, 8 /* t3 = *(s + 8) */ + lwi r12, r6, 12 /* t4 = *(s + 12) */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + swi r10, r5, 4 /* *(d + 4) = t2 */ + swi r11, r5, 8 /* *(d + 8) = t3 */ + swi r12, r5, 12 /* *(d + 12) = t4 */ + lwi r9, r6, 16 /* t1 = *(s + 16) */ + lwi r10, r6, 20 /* t2 = *(s + 20) */ + lwi r11, r6, 24 /* t3 = *(s + 24) */ + lwi r12, r6, 28 /* t4 = *(s + 28) */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + swi r10, r5, 20 /* *(d + 20) = t2 */ + swi r11, r5, 24 /* *(d + 24) = t3 */ + swi r12, r5, 28 /* *(d + 28) = t4 */ + addi r6, r6, 32 /* s = s + 32 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, a_block_aligned /* while (n) loop */ + addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */ + bri a_block_done + +a_block_unaligned: + andi r8, r6, 0xfffffffc /* as = s & ~3 */ + add r6, r6, r4 /* s = s + n */ + lwi r11, r8, 0 /* h = *(as + 0) */ + + addi r9, r9, -1 + beqi r9, a_block_u1 /* t1 was 1 => 1 byte offset */ + addi r9, r9, -1 + beqi r9, a_block_u2 /* t1 was 2 => 2 byte offset */ + +a_block_u3: + bslli r11, r11, 24 /* h = h << 24 */ +a_bu3_loop: + lwi r12, r8, 4 /* v = *(as + 4) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 12) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + lwi r12, r8, 32 /* v = *(as + 32) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + addi r8, r8, 32 /* as = as + 32 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, a_bu3_loop /* while (n) loop */ + addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */ + bri a_block_done + +a_block_u1: + bslli r11, r11, 8 /* h = h << 8 */ +a_bu1_loop: + lwi r12, r8, 4 /* v = *(as + 4) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 12) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + lwi r12, r8, 32 /* v = *(as + 32) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + addi r8, r8, 32 /* as = as + 32 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, a_bu1_loop /* while (n) loop */ + addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */ + bri a_block_done + +a_block_u2: + bslli r11, r11, 16 /* h = h << 16 */ +a_bu2_loop: + lwi r12, r8, 4 /* v = *(as + 4) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 12) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + lwi r12, r8, 32 /* v = *(as + 32) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + addi r8, r8, 32 /* as = as + 32 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, a_bu2_loop /* while (n) loop */ + addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */ + +a_block_done: + addi r4, r0, 4 /* n = 4 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + blti r4, a_xfer_end /* if n < 0, less than one word to transfer */ + +a_word_xfer: + andi r4, r7, 0xfffffffc /* n = c & ~3 */ + addi r10, r0, 0 /* offset = 0 */ + + andi r9, r6, 3 /* t1 = s & 3 */ + /* if temp != 0, unaligned transfers needed */ + bnei r9, a_word_unaligned + +a_word_aligned: + lw r9, r6, r10 /* t1 = *(s+offset) */ + sw r9, r5, r10 /* *(d+offset) = t1 */ + addi r4, r4,-4 /* n-- */ + bneid r4, a_word_aligned /* loop */ + addi r10, r10, 4 /* offset++ (IN DELAY SLOT) */ + + bri a_word_done + +a_word_unaligned: + andi r8, r6, 0xfffffffc /* as = s & ~3 */ + lwi r11, r8, 0 /* h = *(as + 0) */ + addi r8, r8, 4 /* as = as + 4 */ + + addi r9, r9, -1 + beqi r9, a_word_u1 /* t1 was 1 => 1 byte offset */ + addi r9, r9, -1 + beqi r9, a_word_u2 /* t1 was 2 => 2 byte offset */ + +a_word_u3: + bslli r11, r11, 24 /* h = h << 24 */ +a_wu3_loop: + lw r12, r8, r10 /* v = *(as + offset) */ + bsrli r9, r12, 8 /* t1 = v >> 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r10 /* *(d + offset) = t1 */ + bslli r11, r12, 24 /* h = v << 24 */ + addi r4, r4,-4 /* n = n - 4 */ + bneid r4, a_wu3_loop /* while (n) loop */ + addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */ + + bri a_word_done + +a_word_u1: + bslli r11, r11, 8 /* h = h << 8 */ +a_wu1_loop: + lw r12, r8, r10 /* v = *(as + offset) */ + bsrli r9, r12, 24 /* t1 = v >> 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r10 /* *(d + offset) = t1 */ + bslli r11, r12, 8 /* h = v << 8 */ + addi r4, r4,-4 /* n = n - 4 */ + bneid r4, a_wu1_loop /* while (n) loop */ + addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */ + + bri a_word_done + +a_word_u2: + bslli r11, r11, 16 /* h = h << 16 */ +a_wu2_loop: + lw r12, r8, r10 /* v = *(as + offset) */ + bsrli r9, r12, 16 /* t1 = v >> 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r10 /* *(d + offset) = t1 */ + bslli r11, r12, 16 /* h = v << 16 */ + addi r4, r4,-4 /* n = n - 4 */ + bneid r4, a_wu2_loop /* while (n) loop */ + addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */ + +a_word_done: + add r5, r5, r10 /* d = d + offset */ + add r6, r6, r10 /* s = s + offset */ + rsub r7, r10, r7 /* c = c - offset */ + +a_xfer_end: +a_xfer_end_loop: + beqi r7, a_done /* while (c) */ + lbui r9, r6, 0 /* t1 = *s */ + addi r6, r6, 1 /* s++ */ + sbi r9, r5, 0 /* *d = t1 */ + addi r7, r7, -1 /* c-- */ + brid a_xfer_end_loop /* loop */ + addi r5, r5, 1 /* d++ (IN DELAY SLOT) */ + +a_done: + rtsd r15, 8 + nop + +.end memcpy +/*----------------------------------------------------------------------------*/ + .globl memmove + .ent memmove + +memmove: + cmpu r4, r5, r6 /* n = s - d */ + bgei r4,fast_memcpy_ascending + +fast_memcpy_descending: + /* move d to return register as value of function */ + addi r3, r5, 0 + + add r5, r5, r7 /* d = d + c */ + add r6, r6, r7 /* s = s + c */ + + addi r4, r0, 4 /* n = 4 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + blti r4,d_xfer_end /* if n < 0, less than one word to transfer */ + + /* transfer first 0~3 bytes to get aligned dest address */ + andi r4, r5, 3 /* n = d & 3 */ + /* if zero, destination already aligned */ + beqi r4,d_dalign_done + rsub r7, r4, r7 /* c = c - n adjust c */ + +d_xfer_first_loop: + /* if no bytes left to transfer, transfer the bulk */ + beqi r4,d_dalign_done + addi r6, r6, -1 /* s-- */ + addi r5, r5, -1 /* d-- */ + lbui r11, r6, 0 /* h = *s */ + sbi r11, r5, 0 /* *d = h */ + brid d_xfer_first_loop /* loop */ + addi r4, r4, -1 /* n-- (IN DELAY SLOT) */ + +d_dalign_done: + addi r4, r0, 32 /* n = 32 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + /* if n < 0, less than one block to transfer */ + blti r4, d_block_done + +d_block_xfer: + andi r4, r7, 0xffffffe0 /* n = c & ~31 */ + rsub r7, r4, r7 /* c = c - n */ + + andi r9, r6, 3 /* t1 = s & 3 */ + /* if temp != 0, unaligned transfers needed */ + bnei r9, d_block_unaligned + +d_block_aligned: + addi r6, r6, -32 /* s = s - 32 */ + addi r5, r5, -32 /* d = d - 32 */ + lwi r9, r6, 28 /* t1 = *(s + 28) */ + lwi r10, r6, 24 /* t2 = *(s + 24) */ + lwi r11, r6, 20 /* t3 = *(s + 20) */ + lwi r12, r6, 16 /* t4 = *(s + 16) */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + swi r10, r5, 24 /* *(d + 24) = t2 */ + swi r11, r5, 20 /* *(d + 20) = t3 */ + swi r12, r5, 16 /* *(d + 16) = t4 */ + lwi r9, r6, 12 /* t1 = *(s + 12) */ + lwi r10, r6, 8 /* t2 = *(s + 8) */ + lwi r11, r6, 4 /* t3 = *(s + 4) */ + lwi r12, r6, 0 /* t4 = *(s + 0) */ + swi r9, r5, 12 /* *(d + 12) = t1 */ + swi r10, r5, 8 /* *(d + 8) = t2 */ + swi r11, r5, 4 /* *(d + 4) = t3 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, d_block_aligned /* while (n) loop */ + swi r12, r5, 0 /* *(d + 0) = t4 (IN DELAY SLOT) */ + bri d_block_done + +d_block_unaligned: + andi r8, r6, 0xfffffffc /* as = s & ~3 */ + rsub r6, r4, r6 /* s = s - n */ + lwi r11, r8, 0 /* h = *(as + 0) */ + + addi r9, r9, -1 + beqi r9,d_block_u1 /* t1 was 1 => 1 byte offset */ + addi r9, r9, -1 + beqi r9,d_block_u2 /* t1 was 2 => 2 byte offset */ + +d_block_u3: + bsrli r11, r11, 8 /* h = h >> 8 */ +d_bu3_loop: + addi r8, r8, -32 /* as = as - 32 */ + addi r5, r5, -32 /* d = d - 32 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 112) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 4 /* v = *(as + 4) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bsrli r11, r12, 8 /* h = v >> 8 */ + lwi r12, r8, 0 /* v = *(as + 0) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, d_bu3_loop /* while (n) loop */ + bsrli r11, r12, 8 /* h = v >> 8 (IN DELAY SLOT) */ + bri d_block_done + +d_block_u1: + bsrli r11, r11, 24 /* h = h >> 24 */ +d_bu1_loop: + addi r8, r8, -32 /* as = as - 32 */ + addi r5, r5, -32 /* d = d - 32 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 112) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 4 /* v = *(as + 4) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bsrli r11, r12, 24 /* h = v >> 24 */ + lwi r12, r8, 0 /* v = *(as + 0) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, d_bu1_loop /* while (n) loop */ + bsrli r11, r12, 24 /* h = v >> 24 (IN DELAY SLOT) */ + bri d_block_done + +d_block_u2: + bsrli r11, r11, 16 /* h = h >> 16 */ +d_bu2_loop: + addi r8, r8, -32 /* as = as - 32 */ + addi r5, r5, -32 /* d = d - 32 */ + lwi r12, r8, 28 /* v = *(as + 28) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 28 /* *(d + 28) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 24 /* v = *(as + 24) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 24 /* *(d + 24) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 20 /* v = *(as + 20) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 20 /* *(d + 20) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 16 /* v = *(as + 16) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 16 /* *(d + 16) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 12 /* v = *(as + 12) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 12 /* *(d + 112) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 8 /* v = *(as + 8) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 8 /* *(d + 8) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 4 /* v = *(as + 4) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 4 /* *(d + 4) = t1 */ + bsrli r11, r12, 16 /* h = v >> 16 */ + lwi r12, r8, 0 /* v = *(as + 0) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + swi r9, r5, 0 /* *(d + 0) = t1 */ + addi r4, r4, -32 /* n = n - 32 */ + bneid r4, d_bu2_loop /* while (n) loop */ + bsrli r11, r12, 16 /* h = v >> 16 (IN DELAY SLOT) */ + +d_block_done: + addi r4, r0, 4 /* n = 4 */ + cmpu r4, r4, r7 /* n = c - n (unsigned) */ + blti r4,d_xfer_end /* if n < 0, less than one word to transfer */ + +d_word_xfer: + andi r4, r7, 0xfffffffc /* n = c & ~3 */ + rsub r5, r4, r5 /* d = d - n */ + rsub r6, r4, r6 /* s = s - n */ + rsub r7, r4, r7 /* c = c - n */ + + andi r9, r6, 3 /* t1 = s & 3 */ + /* if temp != 0, unaligned transfers needed */ + bnei r9, d_word_unaligned + +d_word_aligned: + addi r4, r4,-4 /* n-- */ + lw r9, r6, r4 /* t1 = *(s+n) */ + bneid r4, d_word_aligned /* loop */ + sw r9, r5, r4 /* *(d+n) = t1 (IN DELAY SLOT) */ + + bri d_word_done + +d_word_unaligned: + andi r8, r6, 0xfffffffc /* as = s & ~3 */ + lw r11, r8, r4 /* h = *(as + n) */ + + addi r9, r9, -1 + beqi r9,d_word_u1 /* t1 was 1 => 1 byte offset */ + addi r9, r9, -1 + beqi r9,d_word_u2 /* t1 was 2 => 2 byte offset */ + +d_word_u3: + bsrli r11, r11, 8 /* h = h >> 8 */ +d_wu3_loop: + addi r4, r4,-4 /* n = n - 4 */ + lw r12, r8, r4 /* v = *(as + n) */ + bslli r9, r12, 24 /* t1 = v << 24 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r4 /* *(d + n) = t1 */ + bneid r4, d_wu3_loop /* while (n) loop */ + bsrli r11, r12, 8 /* h = v >> 8 (IN DELAY SLOT) */ + + bri d_word_done + +d_word_u1: + bsrli r11, r11, 24 /* h = h >> 24 */ +d_wu1_loop: + addi r4, r4,-4 /* n = n - 4 */ + lw r12, r8, r4 /* v = *(as + n) */ + bslli r9, r12, 8 /* t1 = v << 8 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r4 /* *(d + n) = t1 */ + bneid r4, d_wu1_loop /* while (n) loop */ + bsrli r11, r12, 24 /* h = v >> 24 (IN DELAY SLOT) */ + + bri d_word_done + +d_word_u2: + bsrli r11, r11, 16 /* h = h >> 16 */ +d_wu2_loop: + addi r4, r4,-4 /* n = n - 4 */ + lw r12, r8, r4 /* v = *(as + n) */ + bslli r9, r12, 16 /* t1 = v << 16 */ + or r9, r11, r9 /* t1 = h | t1 */ + sw r9, r5, r4 /* *(d + n) = t1 */ + bneid r4, d_wu2_loop /* while (n) loop */ + bsrli r11, r12, 16 /* h = v >> 16 (IN DELAY SLOT) */ + +d_word_done: + +d_xfer_end: +d_xfer_end_loop: + beqi r7, a_done /* while (c) */ + addi r6, r6, -1 /* s-- */ + lbui r9, r6, 0 /* t1 = *s */ + addi r5, r5, -1 /* d-- */ + sbi r9, r5, 0 /* *d = t1 */ + brid d_xfer_end_loop /* loop */ + addi r7, r7, -1 /* c-- (IN DELAY SLOT) */ + +d_done: + rtsd r15, 8 + nop + +.end memmove diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c new file mode 100644 index 00000000000..5880119c448 --- /dev/null +++ b/arch/microblaze/lib/memcpy.c @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2008-2009 Michal Simek + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2007 John Williams + * + * Reasonably optimised generic C-code for memcpy on Microblaze + * This is generic C code to do efficient, alignment-aware memcpy. + * + * It is based on demo code originally Copyright 2001 by Intel Corp, taken from + * http://www.embedded.com/showArticle.jhtml?articleID=19205567 + * + * Attempts were made, unsuccesfully, to contact the original + * author of this code (Michael Morrow, Intel). Below is the original + * copyright notice. + * + * This software has been developed by Intel Corporation. + * Intel specifically disclaims all warranties, express or + * implied, and all liability, including consequential and + * other indirect damages, for the use of this program, including + * liability for infringement of any proprietary rights, + * and including the warranties of merchantability and fitness + * for a particular purpose. Intel does not assume any + * responsibility for and errors which may appear in this program + * not any responsibility to update it. + */ + +#include +#include +#include +#include + +#include +#include + +#ifdef __HAVE_ARCH_MEMCPY +void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) +{ + const char *src = v_src; + char *dst = v_dst; +#ifndef CONFIG_OPT_LIB_FUNCTION + /* Simple, byte oriented memcpy. */ + while (c--) + *dst++ = *src++; + + return v_dst; +#else + /* The following code tries to optimize the copy by using unsigned + * alignment. This will work fine if both source and destination are + * aligned on the same boundary. However, if they are aligned on + * different boundaries shifts will be necessary. This might result in + * bad performance on MicroBlaze systems without a barrel shifter. + */ + const uint32_t *i_src; + uint32_t *i_dst; + + if (c >= 4) { + unsigned value, buf_hold; + + /* Align the dstination to a word boundry. */ + /* This is done in an endian independant manner. */ + switch ((unsigned long)dst & 3) { + case 1: + *dst++ = *src++; + --c; + case 2: + *dst++ = *src++; + --c; + case 3: + *dst++ = *src++; + --c; + } + + i_dst = (void *)dst; + + /* Choose a copy scheme based on the source */ + /* alignment relative to dstination. */ + switch ((unsigned long)src & 3) { + case 0x0: /* Both byte offsets are aligned */ + i_src = (const void *)src; + + for (; c >= 4; c -= 4) + *i_dst++ = *i_src++; + + src = (const void *)i_src; + break; + case 0x1: /* Unaligned - Off by 1 */ + /* Word align the source */ + i_src = (const void *) ((unsigned)src & ~3); + + /* Load the holding buffer */ + buf_hold = *i_src++ << 8; + + for (; c >= 4; c -= 4) { + value = *i_src++; + *i_dst++ = buf_hold | value >> 24; + buf_hold = value << 8; + } + + /* Realign the source */ + src = (const void *)i_src; + src -= 3; + break; + case 0x2: /* Unaligned - Off by 2 */ + /* Word align the source */ + i_src = (const void *) ((unsigned)src & ~3); + + /* Load the holding buffer */ + buf_hold = *i_src++ << 16; + + for (; c >= 4; c -= 4) { + value = *i_src++; + *i_dst++ = buf_hold | value >> 16; + buf_hold = value << 16; + } + + /* Realign the source */ + src = (const void *)i_src; + src -= 2; + break; + case 0x3: /* Unaligned - Off by 3 */ + /* Word align the source */ + i_src = (const void *) ((unsigned)src & ~3); + + /* Load the holding buffer */ + buf_hold = *i_src++ << 24; + + for (; c >= 4; c -= 4) { + value = *i_src++; + *i_dst++ = buf_hold | value >> 8; + buf_hold = value << 24; + } + + /* Realign the source */ + src = (const void *)i_src; + src -= 1; + break; + } + dst = (void *)i_dst; + } + + /* Finish off any remaining bytes */ + /* simple fast copy, ... unless a cache boundry is crossed */ + switch (c) { + case 3: + *dst++ = *src++; + case 2: + *dst++ = *src++; + case 1: + *dst++ = *src++; + } + + return v_dst; +#endif +} +EXPORT_SYMBOL(memcpy); +#endif /* __HAVE_ARCH_MEMCPY */ + +void *cacheable_memcpy(void *d, const void *s, __kernel_size_t c) +{ + return memcpy(d, s, c); +} diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c new file mode 100644 index 00000000000..d4e9f49a71f --- /dev/null +++ b/arch/microblaze/lib/memmove.c @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2008-2009 Michal Simek + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2007 John Williams + * + * Reasonably optimised generic C-code for memcpy on Microblaze + * This is generic C code to do efficient, alignment-aware memmove. + * + * It is based on demo code originally Copyright 2001 by Intel Corp, taken from + * http://www.embedded.com/showArticle.jhtml?articleID=19205567 + * + * Attempts were made, unsuccesfully, to contact the original + * author of this code (Michael Morrow, Intel). Below is the original + * copyright notice. + * + * This software has been developed by Intel Corporation. + * Intel specifically disclaims all warranties, express or + * implied, and all liability, including consequential and + * other indirect damages, for the use of this program, including + * liability for infringement of any proprietary rights, + * and including the warranties of merchantability and fitness + * for a particular purpose. Intel does not assume any + * responsibility for and errors which may appear in this program + * not any responsibility to update it. + */ + +#include +#include +#include +#include +#include + +#ifdef __HAVE_ARCH_MEMMOVE +void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) +{ + const char *src = v_src; + char *dst = v_dst; + +#ifdef CONFIG_OPT_LIB_FUNCTION + const uint32_t *i_src; + uint32_t *i_dst; +#endif + + if (!c) + return v_dst; + + /* Use memcpy when source is higher than dest */ + if (v_dst <= v_src) + return memcpy(v_dst, v_src, c); + +#ifndef CONFIG_OPT_LIB_FUNCTION + /* copy backwards, from end to beginning */ + src += c; + dst += c; + + /* Simple, byte oriented memmove. */ + while (c--) + *--dst = *--src; + + return v_dst; +#else + /* The following code tries to optimize the copy by using unsigned + * alignment. This will work fine if both source and destination are + * aligned on the same boundary. However, if they are aligned on + * different boundaries shifts will be necessary. This might result in + * bad performance on MicroBlaze systems without a barrel shifter. + */ + /* FIXME this part needs more test */ + /* Do a descending copy - this is a bit trickier! */ + dst += c; + src += c; + + if (c >= 4) { + unsigned value, buf_hold; + + /* Align the destination to a word boundry. */ + /* This is done in an endian independant manner. */ + + switch ((unsigned long)dst & 3) { + case 3: + *--dst = *--src; + --c; + case 2: + *--dst = *--src; + --c; + case 1: + *--dst = *--src; + --c; + } + + i_dst = (void *)dst; + /* Choose a copy scheme based on the source */ + /* alignment relative to dstination. */ + switch ((unsigned long)src & 3) { + case 0x0: /* Both byte offsets are aligned */ + + i_src = (const void *)src; + + for (; c >= 4; c -= 4) + *--i_dst = *--i_src; + + src = (const void *)i_src; + break; + case 0x1: /* Unaligned - Off by 1 */ + /* Word align the source */ + i_src = (const void *) (((unsigned)src + 4) & ~3); + + /* Load the holding buffer */ + buf_hold = *--i_src >> 24; + + for (; c >= 4; c -= 4) { + value = *--i_src; + *--i_dst = buf_hold << 8 | value; + buf_hold = value >> 24; + } + + /* Realign the source */ + src = (const void *)i_src; + src += 1; + break; + case 0x2: /* Unaligned - Off by 2 */ + /* Word align the source */ + i_src = (const void *) (((unsigned)src + 4) & ~3); + + /* Load the holding buffer */ + buf_hold = *--i_src >> 16; + + for (; c >= 4; c -= 4) { + value = *--i_src; + *--i_dst = buf_hold << 16 | value; + buf_hold = value >> 16; + } + + /* Realign the source */ + src = (const void *)i_src; + src += 2; + break; + case 0x3: /* Unaligned - Off by 3 */ + /* Word align the source */ + i_src = (const void *) (((unsigned)src + 4) & ~3); + + /* Load the holding buffer */ + buf_hold = *--i_src >> 8; + + for (; c >= 4; c -= 4) { + value = *--i_src; + *--i_dst = buf_hold << 24 | value; + buf_hold = value >> 8; + } + + /* Realign the source */ + src = (const void *)i_src; + src += 3; + break; + } + dst = (void *)i_dst; + } + + /* simple fast copy, ... unless a cache boundry is crossed */ + /* Finish off any remaining bytes */ + switch (c) { + case 4: + *--dst = *--src; + case 3: + *--dst = *--src; + case 2: + *--dst = *--src; + case 1: + *--dst = *--src; + } + return v_dst; +#endif +} +EXPORT_SYMBOL(memmove); +#endif /* __HAVE_ARCH_MEMMOVE */ diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c new file mode 100644 index 00000000000..941dc8f94b0 --- /dev/null +++ b/arch/microblaze/lib/memset.c @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2008-2009 Michal Simek + * Copyright (C) 2008-2009 PetaLogix + * Copyright (C) 2007 John Williams + * + * Reasonably optimised generic C-code for memset on Microblaze + * This is generic C code to do efficient, alignment-aware memcpy. + * + * It is based on demo code originally Copyright 2001 by Intel Corp, taken from + * http://www.embedded.com/showArticle.jhtml?articleID=19205567 + * + * Attempts were made, unsuccesfully, to contact the original + * author of this code (Michael Morrow, Intel). Below is the original + * copyright notice. + * + * This software has been developed by Intel Corporation. + * Intel specifically disclaims all warranties, express or + * implied, and all liability, including consequential and + * other indirect damages, for the use of this program, including + * liability for infringement of any proprietary rights, + * and including the warranties of merchantability and fitness + * for a particular purpose. Intel does not assume any + * responsibility for and errors which may appear in this program + * not any responsibility to update it. + */ + +#include +#include +#include +#include +#include + +#ifdef __HAVE_ARCH_MEMSET +void *memset(void *v_src, int c, __kernel_size_t n) +{ + + char *src = v_src; +#ifdef CONFIG_OPT_LIB_FUNCTION + uint32_t *i_src; + uint32_t w32; +#endif + /* Truncate c to 8 bits */ + c = (c & 0xFF); + +#ifdef CONFIG_OPT_LIB_FUNCTION + /* Make a repeating word out of it */ + w32 = c; + w32 |= w32 << 8; + w32 |= w32 << 16; + + if (n >= 4) { + /* Align the destination to a word boundary */ + /* This is done in an endian independant manner */ + switch ((unsigned) src & 3) { + case 1: + *src++ = c; + --n; + case 2: + *src++ = c; + --n; + case 3: + *src++ = c; + --n; + } + + i_src = (void *)src; + + /* Do as many full-word copies as we can */ + for (; n >= 4; n -= 4) + *i_src++ = w32; + + src = (void *)i_src; + } +#endif + /* Simple, byte oriented memset or the rest of count. */ + while (n--) + *src++ = c; + + return v_src; +} +EXPORT_SYMBOL(memset); +#endif /* __HAVE_ARCH_MEMSET */ -- cgit v1.2.3 From f11e044b449c0534cd2de3465f68925f68190866 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Fri, 27 Mar 2009 14:25:22 +0100 Subject: microblaze_v8: checksum support Reviewed-by: Ingo Molnar Acked-by: Stephen Neuendorffer Acked-by: John Linn Acked-by: John Williams Signed-off-by: Michal Simek --- arch/microblaze/lib/checksum.c | 163 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 163 insertions(+) create mode 100644 arch/microblaze/lib/checksum.c (limited to 'arch/microblaze/lib') diff --git a/arch/microblaze/lib/checksum.c b/arch/microblaze/lib/checksum.c new file mode 100644 index 00000000000..809340070a1 --- /dev/null +++ b/arch/microblaze/lib/checksum.c @@ -0,0 +1,163 @@ +/* + * + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * IP/TCP/UDP checksumming routines + * + * Authors: Jorge Cwik, + * Arnt Gulbrandsen, + * Tom May, + * Andreas Schwab, + * Lots of code moved from tcp.c and ip.c; see those files + * for more names. + * + * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: + * Fixed some nasty bugs, causing some horrible crashes. + * A: At some points, the sum (%0) was used as + * length-counter instead of the length counter + * (%1). Thanks to Roman Hodek for pointing this out. + * B: GCC seems to mess up if one uses too many + * data-registers to hold input values and one tries to + * specify d0 and d1 as scratch registers. Letting gcc + * choose these registers itself solves the problem. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access + kills, so most of the assembly has to go. */ + +#include +#include +#include + +static inline unsigned short from32to16(unsigned long x) +{ + /* add up 16-bit and 16-bit for 16+c bit */ + x = (x & 0xffff) + (x >> 16); + /* add up carry.. */ + x = (x & 0xffff) + (x >> 16); + return x; +} + +static unsigned int do_csum(const unsigned char *buff, int len) +{ + int odd, count; + unsigned long result = 0; + + if (len <= 0) + goto out; + odd = 1 & (unsigned long) buff; + if (odd) { + result = *buff; + len--; + buff++; + } + count = len >> 1; /* nr of 16-bit words.. */ + if (count) { + if (2 & (unsigned long) buff) { + result += *(unsigned short *) buff; + count--; + len -= 2; + buff += 2; + } + count >>= 1; /* nr of 32-bit words.. */ + if (count) { + unsigned long carry = 0; + do { + unsigned long w = *(unsigned long *) buff; + count--; + buff += 4; + result += carry; + result += w; + carry = (w > result); + } while (count); + result += carry; + result = (result & 0xffff) + (result >> 16); + } + if (len & 2) { + result += *(unsigned short *) buff; + buff += 2; + } + } + if (len & 1) + result += (*buff << 8); + result = from32to16(result); + if (odd) + result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); +out: + return result; +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +__sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + return (__force __sum16)~do_csum(iph, ihl*4); +} + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum sum) +{ + unsigned int result = do_csum(buff, len); + + /* add in old sum, and carry.. */ + result += sum; + if (sum > result) + result += 1; + return result; +} +EXPORT_SYMBOL(csum_partial); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +__sum16 ip_compute_csum(const unsigned char *buff, int len) +{ + return ~do_csum(buff, len); +} +EXPORT_SYMBOL(ip_compute_csum); + +/* + * copy from fs while checksumming, otherwise like csum_partial + */ +__wsum +csum_partial_copy_from_user(const char __user *src, char *dst, int len, + int sum, int *csum_err) +{ + if (csum_err) + *csum_err = 0; + memcpy(dst, src, len); + return csum_partial(dst, len, sum); +} +EXPORT_SYMBOL(csum_partial_copy_from_user); + +/* + * copy from ds while checksumming, otherwise like csum_partial + */ +__wsum +csum_partial_copy(const char *src, char *dst, int len, int sum) +{ + memcpy(dst, src, len); + return csum_partial(dst, len, sum); +} +EXPORT_SYMBOL(csum_partial_copy); -- cgit v1.2.3 From 2660663ff2d34a3665381a2591bbc3ce0cdbd69c Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Fri, 27 Mar 2009 14:25:23 +0100 Subject: microblaze_v8: uaccess files Reviewed-by: Ingo Molnar Acked-by: Stephen Neuendorffer Acked-by: John Linn Acked-by: John Williams Signed-off-by: Michal Simek --- arch/microblaze/lib/uaccess.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 arch/microblaze/lib/uaccess.c (limited to 'arch/microblaze/lib') diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c new file mode 100644 index 00000000000..8eb9df5a26c --- /dev/null +++ b/arch/microblaze/lib/uaccess.c @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include + +#include + +long strnlen_user(const char __user *src, long count) +{ + return strlen(src) + 1; +} + +#define __do_strncpy_from_user(dst, src, count, res) \ + do { \ + char *tmp; \ + strncpy(dst, src, count); \ + for (tmp = dst; *tmp && count > 0; tmp++, count--) \ + ; \ + res = (tmp - dst); \ + } while (0) + +long __strncpy_from_user(char *dst, const char __user *src, long count) +{ + long res; + __do_strncpy_from_user(dst, src, count, res); + return res; +} + +long strncpy_from_user(char *dst, const char __user *src, long count) +{ + long res = -EFAULT; + if (access_ok(VERIFY_READ, src, 1)) + __do_strncpy_from_user(dst, src, count, res); + return res; +} -- cgit v1.2.3 From 5f8ffb5f6649a261372547a5841285c23409ab68 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Fri, 27 Mar 2009 14:25:51 +0100 Subject: microblaze_v8: Makefiles for Microblaze cpu Reviewed-by: Ingo Molnar Acked-by: Randy Dunlap Acked-by: John Linn Acked-by: Stephen Neuendorffer Acked-by: John Williams Signed-off-by: Michal Simek --- arch/microblaze/lib/Makefile | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 arch/microblaze/lib/Makefile (limited to 'arch/microblaze/lib') diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile new file mode 100644 index 00000000000..d27126bf306 --- /dev/null +++ b/arch/microblaze/lib/Makefile @@ -0,0 +1,13 @@ +# +# Makefile +# + +lib-y := memset.o checksum.o + +ifeq ($(CONFIG_OPT_LIB_ASM),y) +lib-y += fastcopy.o +else +lib-y += memcpy.o memmove.o +endif + +lib-y += uaccess.o -- cgit v1.2.3