Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/um/sys-i386/tls.c
10817 views
1
/*
2
* Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <[email protected]>
3
* Licensed under the GPL
4
*/
5
6
#include "linux/percpu.h"
7
#include "linux/sched.h"
8
#include "asm/uaccess.h"
9
#include "os.h"
10
#include "skas.h"
11
#include "sysdep/tls.h"
12
13
/*
14
* If needed we can detect when it's uninitialized.
15
*
16
* These are initialized in an initcall and unchanged thereafter.
17
*/
18
static int host_supports_tls = -1;
19
int host_gdt_entry_tls_min;
20
21
int do_set_thread_area(struct user_desc *info)
22
{
23
int ret;
24
u32 cpu;
25
26
cpu = get_cpu();
27
ret = os_set_thread_area(info, userspace_pid[cpu]);
28
put_cpu();
29
30
if (ret)
31
printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
32
"index = %d\n", ret, info->entry_number);
33
34
return ret;
35
}
36
37
int do_get_thread_area(struct user_desc *info)
38
{
39
int ret;
40
u32 cpu;
41
42
cpu = get_cpu();
43
ret = os_get_thread_area(info, userspace_pid[cpu]);
44
put_cpu();
45
46
if (ret)
47
printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
48
"index = %d\n", ret, info->entry_number);
49
50
return ret;
51
}
52
53
/*
54
* sys_get_thread_area: get a yet unused TLS descriptor index.
55
* XXX: Consider leaving one free slot for glibc usage at first place. This must
56
* be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
57
*
58
* Also, this must be tested when compiling in SKAS mode with dynamic linking
59
* and running against NPTL.
60
*/
61
static int get_free_idx(struct task_struct* task)
62
{
63
struct thread_struct *t = &task->thread;
64
int idx;
65
66
if (!t->arch.tls_array)
67
return GDT_ENTRY_TLS_MIN;
68
69
for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
70
if (!t->arch.tls_array[idx].present)
71
return idx + GDT_ENTRY_TLS_MIN;
72
return -ESRCH;
73
}
74
75
static inline void clear_user_desc(struct user_desc* info)
76
{
77
/* Postcondition: LDT_empty(info) returns true. */
78
memset(info, 0, sizeof(*info));
79
80
/*
81
* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
82
* indeed an empty user_desc.
83
*/
84
info->read_exec_only = 1;
85
info->seg_not_present = 1;
86
}
87
88
#define O_FORCE 1
89
90
static int load_TLS(int flags, struct task_struct *to)
91
{
92
int ret = 0;
93
int idx;
94
95
for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
96
struct uml_tls_struct* curr =
97
&to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
98
99
/*
100
* Actually, now if it wasn't flushed it gets cleared and
101
* flushed to the host, which will clear it.
102
*/
103
if (!curr->present) {
104
if (!curr->flushed) {
105
clear_user_desc(&curr->tls);
106
curr->tls.entry_number = idx;
107
} else {
108
WARN_ON(!LDT_empty(&curr->tls));
109
continue;
110
}
111
}
112
113
if (!(flags & O_FORCE) && curr->flushed)
114
continue;
115
116
ret = do_set_thread_area(&curr->tls);
117
if (ret)
118
goto out;
119
120
curr->flushed = 1;
121
}
122
out:
123
return ret;
124
}
125
126
/*
127
* Verify if we need to do a flush for the new process, i.e. if there are any
128
* present desc's, only if they haven't been flushed.
129
*/
130
static inline int needs_TLS_update(struct task_struct *task)
131
{
132
int i;
133
int ret = 0;
134
135
for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
136
struct uml_tls_struct* curr =
137
&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
138
139
/*
140
* Can't test curr->present, we may need to clear a descriptor
141
* which had a value.
142
*/
143
if (curr->flushed)
144
continue;
145
ret = 1;
146
break;
147
}
148
return ret;
149
}
150
151
/*
152
* On a newly forked process, the TLS descriptors haven't yet been flushed. So
153
* we mark them as such and the first switch_to will do the job.
154
*/
155
void clear_flushed_tls(struct task_struct *task)
156
{
157
int i;
158
159
for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
160
struct uml_tls_struct* curr =
161
&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
162
163
/*
164
* Still correct to do this, if it wasn't present on the host it
165
* will remain as flushed as it was.
166
*/
167
if (!curr->present)
168
continue;
169
170
curr->flushed = 0;
171
}
172
}
173
174
/*
175
* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
176
* common host process. So this is needed in SKAS0 too.
177
*
178
* However, if each thread had a different host process (and this was discussed
179
* for SMP support) this won't be needed.
180
*
181
* And this will not need be used when (and if) we'll add support to the host
182
* SKAS patch.
183
*/
184
185
int arch_switch_tls(struct task_struct *to)
186
{
187
if (!host_supports_tls)
188
return 0;
189
190
/*
191
* We have no need whatsoever to switch TLS for kernel threads; beyond
192
* that, that would also result in us calling os_set_thread_area with
193
* userspace_pid[cpu] == 0, which gives an error.
194
*/
195
if (likely(to->mm))
196
return load_TLS(O_FORCE, to);
197
198
return 0;
199
}
200
201
static int set_tls_entry(struct task_struct* task, struct user_desc *info,
202
int idx, int flushed)
203
{
204
struct thread_struct *t = &task->thread;
205
206
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
207
return -EINVAL;
208
209
t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
210
t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
211
t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
212
213
return 0;
214
}
215
216
int arch_copy_tls(struct task_struct *new)
217
{
218
struct user_desc info;
219
int idx, ret = -EFAULT;
220
221
if (copy_from_user(&info,
222
(void __user *) UPT_ESI(&new->thread.regs.regs),
223
sizeof(info)))
224
goto out;
225
226
ret = -EINVAL;
227
if (LDT_empty(&info))
228
goto out;
229
230
idx = info.entry_number;
231
232
ret = set_tls_entry(new, &info, idx, 0);
233
out:
234
return ret;
235
}
236
237
/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
238
static int get_tls_entry(struct task_struct *task, struct user_desc *info,
239
int idx)
240
{
241
struct thread_struct *t = &task->thread;
242
243
if (!t->arch.tls_array)
244
goto clear;
245
246
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
247
return -EINVAL;
248
249
if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
250
goto clear;
251
252
*info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
253
254
out:
255
/*
256
* Temporary debugging check, to make sure that things have been
257
* flushed. This could be triggered if load_TLS() failed.
258
*/
259
if (unlikely(task == current &&
260
!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
261
printk(KERN_ERR "get_tls_entry: task with pid %d got here "
262
"without flushed TLS.", current->pid);
263
}
264
265
return 0;
266
clear:
267
/*
268
* When the TLS entry has not been set, the values read to user in the
269
* tls_array are 0 (because it's cleared at boot, see
270
* arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
271
*/
272
clear_user_desc(info);
273
info->entry_number = idx;
274
goto out;
275
}
276
277
int sys_set_thread_area(struct user_desc __user *user_desc)
278
{
279
struct user_desc info;
280
int idx, ret;
281
282
if (!host_supports_tls)
283
return -ENOSYS;
284
285
if (copy_from_user(&info, user_desc, sizeof(info)))
286
return -EFAULT;
287
288
idx = info.entry_number;
289
290
if (idx == -1) {
291
idx = get_free_idx(current);
292
if (idx < 0)
293
return idx;
294
info.entry_number = idx;
295
/* Tell the user which slot we chose for him.*/
296
if (put_user(idx, &user_desc->entry_number))
297
return -EFAULT;
298
}
299
300
ret = do_set_thread_area(&info);
301
if (ret)
302
return ret;
303
return set_tls_entry(current, &info, idx, 1);
304
}
305
306
/*
307
* Perform set_thread_area on behalf of the traced child.
308
* Note: error handling is not done on the deferred load, and this differ from
309
* i386. However the only possible error are caused by bugs.
310
*/
311
int ptrace_set_thread_area(struct task_struct *child, int idx,
312
struct user_desc __user *user_desc)
313
{
314
struct user_desc info;
315
316
if (!host_supports_tls)
317
return -EIO;
318
319
if (copy_from_user(&info, user_desc, sizeof(info)))
320
return -EFAULT;
321
322
return set_tls_entry(child, &info, idx, 0);
323
}
324
325
int sys_get_thread_area(struct user_desc __user *user_desc)
326
{
327
struct user_desc info;
328
int idx, ret;
329
330
if (!host_supports_tls)
331
return -ENOSYS;
332
333
if (get_user(idx, &user_desc->entry_number))
334
return -EFAULT;
335
336
ret = get_tls_entry(current, &info, idx);
337
if (ret < 0)
338
goto out;
339
340
if (copy_to_user(user_desc, &info, sizeof(info)))
341
ret = -EFAULT;
342
343
out:
344
return ret;
345
}
346
347
/*
348
* Perform get_thread_area on behalf of the traced child.
349
*/
350
int ptrace_get_thread_area(struct task_struct *child, int idx,
351
struct user_desc __user *user_desc)
352
{
353
struct user_desc info;
354
int ret;
355
356
if (!host_supports_tls)
357
return -EIO;
358
359
ret = get_tls_entry(child, &info, idx);
360
if (ret < 0)
361
goto out;
362
363
if (copy_to_user(user_desc, &info, sizeof(info)))
364
ret = -EFAULT;
365
out:
366
return ret;
367
}
368
369
/*
370
* This code is really i386-only, but it detects and logs x86_64 GDT indexes
371
* if a 32-bit UML is running on a 64-bit host.
372
*/
373
static int __init __setup_host_supports_tls(void)
374
{
375
check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
376
if (host_supports_tls) {
377
printk(KERN_INFO "Host TLS support detected\n");
378
printk(KERN_INFO "Detected host type: ");
379
switch (host_gdt_entry_tls_min) {
380
case GDT_ENTRY_TLS_MIN_I386:
381
printk(KERN_CONT "i386");
382
break;
383
case GDT_ENTRY_TLS_MIN_X86_64:
384
printk(KERN_CONT "x86_64");
385
break;
386
}
387
printk(KERN_CONT " (GDT indexes %d to %d)\n",
388
host_gdt_entry_tls_min,
389
host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
390
} else
391
printk(KERN_ERR " Host TLS support NOT detected! "
392
"TLS support inside UML will not work\n");
393
return 0;
394
}
395
396
__initcall(__setup_host_supports_tls);
397
398