Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/lib/copy_user_nocache_64.S
10818 views
1
/*
2
* Copyright 2008 Vitaly Mayatskikh <[email protected]>
3
* Copyright 2002 Andi Kleen, SuSE Labs.
4
* Subject to the GNU Public License v2.
5
*
6
* Functions to copy from and to user space.
7
*/
8
9
#include <linux/linkage.h>
10
#include <asm/dwarf2.h>
11
12
#define FIX_ALIGNMENT 1
13
14
#include <asm/current.h>
15
#include <asm/asm-offsets.h>
16
#include <asm/thread_info.h>
17
18
.macro ALIGN_DESTINATION
19
#ifdef FIX_ALIGNMENT
20
/* check for bad alignment of destination */
21
movl %edi,%ecx
22
andl $7,%ecx
23
jz 102f /* already aligned */
24
subl $8,%ecx
25
negl %ecx
26
subl %ecx,%edx
27
100: movb (%rsi),%al
28
101: movb %al,(%rdi)
29
incq %rsi
30
incq %rdi
31
decl %ecx
32
jnz 100b
33
102:
34
.section .fixup,"ax"
35
103: addl %ecx,%edx /* ecx is zerorest also */
36
jmp copy_user_handle_tail
37
.previous
38
39
.section __ex_table,"a"
40
.align 8
41
.quad 100b,103b
42
.quad 101b,103b
43
.previous
44
#endif
45
.endm
46
47
/*
48
* copy_user_nocache - Uncached memory copy with exception handling
49
* This will force destination/source out of cache for more performance.
50
*/
51
ENTRY(__copy_user_nocache)
52
CFI_STARTPROC
53
cmpl $8,%edx
54
jb 20f /* less then 8 bytes, go to byte copy loop */
55
ALIGN_DESTINATION
56
movl %edx,%ecx
57
andl $63,%edx
58
shrl $6,%ecx
59
jz 17f
60
1: movq (%rsi),%r8
61
2: movq 1*8(%rsi),%r9
62
3: movq 2*8(%rsi),%r10
63
4: movq 3*8(%rsi),%r11
64
5: movnti %r8,(%rdi)
65
6: movnti %r9,1*8(%rdi)
66
7: movnti %r10,2*8(%rdi)
67
8: movnti %r11,3*8(%rdi)
68
9: movq 4*8(%rsi),%r8
69
10: movq 5*8(%rsi),%r9
70
11: movq 6*8(%rsi),%r10
71
12: movq 7*8(%rsi),%r11
72
13: movnti %r8,4*8(%rdi)
73
14: movnti %r9,5*8(%rdi)
74
15: movnti %r10,6*8(%rdi)
75
16: movnti %r11,7*8(%rdi)
76
leaq 64(%rsi),%rsi
77
leaq 64(%rdi),%rdi
78
decl %ecx
79
jnz 1b
80
17: movl %edx,%ecx
81
andl $7,%edx
82
shrl $3,%ecx
83
jz 20f
84
18: movq (%rsi),%r8
85
19: movnti %r8,(%rdi)
86
leaq 8(%rsi),%rsi
87
leaq 8(%rdi),%rdi
88
decl %ecx
89
jnz 18b
90
20: andl %edx,%edx
91
jz 23f
92
movl %edx,%ecx
93
21: movb (%rsi),%al
94
22: movb %al,(%rdi)
95
incq %rsi
96
incq %rdi
97
decl %ecx
98
jnz 21b
99
23: xorl %eax,%eax
100
sfence
101
ret
102
103
.section .fixup,"ax"
104
30: shll $6,%ecx
105
addl %ecx,%edx
106
jmp 60f
107
40: lea (%rdx,%rcx,8),%rdx
108
jmp 60f
109
50: movl %ecx,%edx
110
60: sfence
111
jmp copy_user_handle_tail
112
.previous
113
114
.section __ex_table,"a"
115
.quad 1b,30b
116
.quad 2b,30b
117
.quad 3b,30b
118
.quad 4b,30b
119
.quad 5b,30b
120
.quad 6b,30b
121
.quad 7b,30b
122
.quad 8b,30b
123
.quad 9b,30b
124
.quad 10b,30b
125
.quad 11b,30b
126
.quad 12b,30b
127
.quad 13b,30b
128
.quad 14b,30b
129
.quad 15b,30b
130
.quad 16b,30b
131
.quad 18b,40b
132
.quad 19b,40b
133
.quad 21b,50b
134
.quad 22b,50b
135
.previous
136
CFI_ENDPROC
137
ENDPROC(__copy_user_nocache)
138
139