Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/lib/uaccess_with_memcpy.c
26292 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* linux/arch/arm/lib/uaccess_with_memcpy.c
4
*
5
* Written by: Lennert Buytenhek and Nicolas Pitre
6
* Copyright (C) 2009 Marvell Semiconductor
7
*/
8
9
#include <linux/kernel.h>
10
#include <linux/ctype.h>
11
#include <linux/uaccess.h>
12
#include <linux/rwsem.h>
13
#include <linux/mm.h>
14
#include <linux/sched.h>
15
#include <linux/hardirq.h> /* for in_atomic() */
16
#include <linux/gfp.h>
17
#include <linux/highmem.h>
18
#include <linux/hugetlb.h>
19
#include <asm/current.h>
20
#include <asm/page.h>
21
22
static int
23
pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
24
{
25
unsigned long addr = (unsigned long)_addr;
26
pgd_t *pgd;
27
p4d_t *p4d;
28
pmd_t *pmd;
29
pte_t *pte;
30
pud_t *pud;
31
spinlock_t *ptl;
32
33
pgd = pgd_offset(current->mm, addr);
34
if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
35
return 0;
36
37
p4d = p4d_offset(pgd, addr);
38
if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
39
return 0;
40
41
pud = pud_offset(p4d, addr);
42
if (unlikely(pud_none(*pud) || pud_bad(*pud)))
43
return 0;
44
45
pmd = pmd_offset(pud, addr);
46
if (unlikely(pmd_none(*pmd)))
47
return 0;
48
49
/*
50
* A pmd can be bad if it refers to a HugeTLB or THP page.
51
*
52
* Both THP and HugeTLB pages have the same pmd layout
53
* and should not be manipulated by the pte functions.
54
*
55
* Lock the page table for the destination and check
56
* to see that it's still huge and whether or not we will
57
* need to fault on write.
58
*/
59
if (unlikely(pmd_leaf(*pmd))) {
60
ptl = &current->mm->page_table_lock;
61
spin_lock(ptl);
62
if (unlikely(!pmd_leaf(*pmd)
63
|| pmd_hugewillfault(*pmd))) {
64
spin_unlock(ptl);
65
return 0;
66
}
67
68
*ptep = NULL;
69
*ptlp = ptl;
70
return 1;
71
}
72
73
if (unlikely(pmd_bad(*pmd)))
74
return 0;
75
76
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
77
if (unlikely(!pte))
78
return 0;
79
80
if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
81
!pte_write(*pte) || !pte_dirty(*pte))) {
82
pte_unmap_unlock(pte, ptl);
83
return 0;
84
}
85
86
*ptep = pte;
87
*ptlp = ptl;
88
89
return 1;
90
}
91
92
static unsigned long noinline
93
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
94
{
95
unsigned long ua_flags;
96
int atomic;
97
98
/* the mmap semaphore is taken only if not in an atomic context */
99
atomic = faulthandler_disabled();
100
101
if (!atomic)
102
mmap_read_lock(current->mm);
103
while (n) {
104
pte_t *pte;
105
spinlock_t *ptl;
106
int tocopy;
107
108
while (!pin_page_for_write(to, &pte, &ptl)) {
109
if (!atomic)
110
mmap_read_unlock(current->mm);
111
if (__put_user(0, (char __user *)to))
112
goto out;
113
if (!atomic)
114
mmap_read_lock(current->mm);
115
}
116
117
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
118
if (tocopy > n)
119
tocopy = n;
120
121
ua_flags = uaccess_save_and_enable();
122
__memcpy((void *)to, from, tocopy);
123
uaccess_restore(ua_flags);
124
to += tocopy;
125
from += tocopy;
126
n -= tocopy;
127
128
if (pte)
129
pte_unmap_unlock(pte, ptl);
130
else
131
spin_unlock(ptl);
132
}
133
if (!atomic)
134
mmap_read_unlock(current->mm);
135
136
out:
137
return n;
138
}
139
140
unsigned long
141
arm_copy_to_user(void __user *to, const void *from, unsigned long n)
142
{
143
/*
144
* This test is stubbed out of the main function above to keep
145
* the overhead for small copies low by avoiding a large
146
* register dump on the stack just to reload them right away.
147
* With frame pointer disabled, tail call optimization kicks in
148
* as well making this test almost invisible.
149
*/
150
if (n < 64) {
151
unsigned long ua_flags = uaccess_save_and_enable();
152
n = __copy_to_user_std(to, from, n);
153
uaccess_restore(ua_flags);
154
} else {
155
n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
156
from, n);
157
}
158
return n;
159
}
160
161
static unsigned long noinline
162
__clear_user_memset(void __user *addr, unsigned long n)
163
{
164
unsigned long ua_flags;
165
166
mmap_read_lock(current->mm);
167
while (n) {
168
pte_t *pte;
169
spinlock_t *ptl;
170
int tocopy;
171
172
while (!pin_page_for_write(addr, &pte, &ptl)) {
173
mmap_read_unlock(current->mm);
174
if (__put_user(0, (char __user *)addr))
175
goto out;
176
mmap_read_lock(current->mm);
177
}
178
179
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
180
if (tocopy > n)
181
tocopy = n;
182
183
ua_flags = uaccess_save_and_enable();
184
__memset((void *)addr, 0, tocopy);
185
uaccess_restore(ua_flags);
186
addr += tocopy;
187
n -= tocopy;
188
189
if (pte)
190
pte_unmap_unlock(pte, ptl);
191
else
192
spin_unlock(ptl);
193
}
194
mmap_read_unlock(current->mm);
195
196
out:
197
return n;
198
}
199
200
unsigned long arm_clear_user(void __user *addr, unsigned long n)
201
{
202
/* See rational for this in __copy_to_user() above. */
203
if (n < 64) {
204
unsigned long ua_flags = uaccess_save_and_enable();
205
n = __clear_user_std(addr, n);
206
uaccess_restore(ua_flags);
207
} else {
208
n = __clear_user_memset(addr, n);
209
}
210
return n;
211
}
212
213
#if 0
214
215
/*
216
* This code is disabled by default, but kept around in case the chosen
217
* thresholds need to be revalidated. Some overhead (small but still)
218
* would be implied by a runtime determined variable threshold, and
219
* so far the measurement on concerned targets didn't show a worthwhile
220
* variation.
221
*
222
* Note that a fairly precise sched_clock() implementation is needed
223
* for results to make some sense.
224
*/
225
226
#include <linux/vmalloc.h>
227
228
static int __init test_size_treshold(void)
229
{
230
struct page *src_page, *dst_page;
231
void *user_ptr, *kernel_ptr;
232
unsigned long long t0, t1, t2;
233
int size, ret;
234
235
ret = -ENOMEM;
236
src_page = alloc_page(GFP_KERNEL);
237
if (!src_page)
238
goto no_src;
239
dst_page = alloc_page(GFP_KERNEL);
240
if (!dst_page)
241
goto no_dst;
242
kernel_ptr = page_address(src_page);
243
user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
244
if (!user_ptr)
245
goto no_vmap;
246
247
/* warm up the src page dcache */
248
ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
249
250
for (size = PAGE_SIZE; size >= 4; size /= 2) {
251
t0 = sched_clock();
252
ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
253
t1 = sched_clock();
254
ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
255
t2 = sched_clock();
256
printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
257
}
258
259
for (size = PAGE_SIZE; size >= 4; size /= 2) {
260
t0 = sched_clock();
261
ret |= __clear_user_memset(user_ptr, size);
262
t1 = sched_clock();
263
ret |= __clear_user_std(user_ptr, size);
264
t2 = sched_clock();
265
printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
266
}
267
268
if (ret)
269
ret = -EFAULT;
270
271
vunmap(user_ptr);
272
no_vmap:
273
put_page(dst_page);
274
no_dst:
275
put_page(src_page);
276
no_src:
277
return ret;
278
}
279
280
subsys_initcall(test_size_treshold);
281
282
#endif
283
284