aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/lib/copy_page.S
blob: dd3aa47b6bf51d09fcb24dc8b8affe4a7e3e7ed5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
	
/* Don't use streaming store because it's better when the target
   ends up in cache. */
	    
/* Could vary the prefetch distance based on SMP/UP */

	.globl copy_page
	.p2align 4
copy_page:
	subq	$3*8,%rsp
	movq	%rbx,(%rsp)
	movq	%r12,1*8(%rsp)
	movq	%r13,2*8(%rsp)
			
	movl	$(4096/64)-5,%ecx
	.p2align 4
.Loop64:	
  	dec     %rcx

	movq        (%rsi), %rax
	movq      8 (%rsi), %rbx
	movq     16 (%rsi), %rdx
	movq     24 (%rsi), %r8
	movq     32 (%rsi), %r9
	movq     40 (%rsi), %r10
	movq     48 (%rsi), %r11
	movq     56 (%rsi), %r12

	prefetcht0 5*64(%rsi)

	movq     %rax,    (%rdi)
	movq     %rbx,  8 (%rdi)
	movq     %rdx, 16 (%rdi)
	movq     %r8,  24 (%rdi)
	movq     %r9,  32 (%rdi)
	movq     %r10, 40 (%rdi)
	movq     %r11, 48 (%rdi)
	movq     %r12, 56 (%rdi)

	leaq    64 (%rsi), %rsi
	leaq    64 (%rdi), %rdi

	jnz     .Loop64

	movl	$5,%ecx
	.p2align 4
.Loop2:	
	decl   %ecx

	movq        (%rsi), %rax
	movq      8 (%rsi), %rbx
	movq     16 (%rsi), %rdx
	movq     24 (%rsi), %r8
	movq     32 (%rsi), %r9
	movq     40 (%rsi), %r10
	movq     48 (%rsi), %r11
	movq     56 (%rsi), %r12

	movq     %rax,    (%rdi)
	movq     %rbx,  8 (%rdi)
	movq     %rdx, 16 (%rdi)
	movq     %r8,  24 (%rdi)
	movq     %r9,  32 (%rdi)
	movq     %r10, 40 (%rdi)
	movq     %r11, 48 (%rdi)
	movq     %r12, 56 (%rdi)
	
	leaq	64(%rdi),%rdi			
	leaq	64(%rsi),%rsi			
	
	jnz	.Loop2		
	
	movq	(%rsp),%rbx
	movq	1*8(%rsp),%r12
	movq	2*8(%rsp),%r13
	addq	$3*8,%rsp
	ret
	
	/* C stepping K8 run faster using the string copy instructions.
	   It is also a lot simpler. Use this when possible */

#include <asm/cpufeature.h>		
		
	.section .altinstructions,"a"
	.align 8
	.quad  copy_page
	.quad  copy_page_c
	.byte  X86_FEATURE_K8_C
	.byte  copy_page_c_end-copy_page_c
	.byte  copy_page_c_end-copy_page_c
	.previous

	.section .altinstr_replacement,"ax"
copy_page_c:
	movl $4096/8,%ecx
	rep 
	movsq 
	ret
copy_page_c_end:
	.previous