Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/um/kernel/process.c
48962 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4
* Copyright (C) 2015 Thomas Meyer ([email protected])
5
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6
* Copyright 2003 PathScale, Inc.
7
*/
8
9
#include <linux/stddef.h>
10
#include <linux/err.h>
11
#include <linux/hardirq.h>
12
#include <linux/mm.h>
13
#include <linux/module.h>
14
#include <linux/personality.h>
15
#include <linux/proc_fs.h>
16
#include <linux/ptrace.h>
17
#include <linux/random.h>
18
#include <linux/cpu.h>
19
#include <linux/slab.h>
20
#include <linux/sched.h>
21
#include <linux/sched/debug.h>
22
#include <linux/sched/task.h>
23
#include <linux/sched/task_stack.h>
24
#include <linux/seq_file.h>
25
#include <linux/tick.h>
26
#include <linux/threads.h>
27
#include <linux/resume_user_mode.h>
28
#include <asm/current.h>
29
#include <asm/mmu_context.h>
30
#include <asm/switch_to.h>
31
#include <asm/exec.h>
32
#include <linux/uaccess.h>
33
#include <as-layout.h>
34
#include <kern_util.h>
35
#include <os.h>
36
#include <skas.h>
37
#include <registers.h>
38
#include <linux/time-internal.h>
39
#include <linux/elfcore.h>
40
41
/*
42
* This is a per-cpu array. A processor only modifies its entry and it only
43
* cares about its entry, so it's OK if another processor is modifying its
44
* entry.
45
*/
46
struct task_struct *cpu_tasks[NR_CPUS] = {
47
[0 ... NR_CPUS - 1] = &init_task,
48
};
49
EXPORT_SYMBOL(cpu_tasks);
50
51
void free_stack(unsigned long stack, int order)
52
{
53
free_pages(stack, order);
54
}
55
56
unsigned long alloc_stack(int order, int atomic)
57
{
58
unsigned long page;
59
gfp_t flags = GFP_KERNEL;
60
61
if (atomic)
62
flags = GFP_ATOMIC;
63
page = __get_free_pages(flags, order);
64
65
return page;
66
}
67
68
static inline void set_current(struct task_struct *task)
69
{
70
cpu_tasks[task_thread_info(task)->cpu] = task;
71
}
72
73
struct task_struct *__switch_to(struct task_struct *from, struct task_struct *to)
74
{
75
to->thread.prev_sched = from;
76
set_current(to);
77
78
switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
79
arch_switch_to(current);
80
81
return current->thread.prev_sched;
82
}
83
84
void interrupt_end(void)
85
{
86
struct pt_regs *regs = &current->thread.regs;
87
unsigned long thread_flags;
88
89
thread_flags = read_thread_flags();
90
while (thread_flags & _TIF_WORK_MASK) {
91
if (thread_flags & _TIF_NEED_RESCHED)
92
schedule();
93
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
94
do_signal(regs);
95
if (thread_flags & _TIF_NOTIFY_RESUME)
96
resume_user_mode_work(regs);
97
thread_flags = read_thread_flags();
98
}
99
}
100
101
int get_current_pid(void)
102
{
103
return task_pid_nr(current);
104
}
105
106
/*
107
* This is called magically, by its address being stuffed in a jmp_buf
108
* and being longjmp-d to.
109
*/
110
void new_thread_handler(void)
111
{
112
int (*fn)(void *);
113
void *arg;
114
115
if (current->thread.prev_sched != NULL)
116
schedule_tail(current->thread.prev_sched);
117
current->thread.prev_sched = NULL;
118
119
fn = current->thread.request.thread.proc;
120
arg = current->thread.request.thread.arg;
121
122
/*
123
* callback returns only if the kernel thread execs a process
124
*/
125
fn(arg);
126
userspace(&current->thread.regs.regs);
127
}
128
129
/* Called magically, see new_thread_handler above */
130
static void fork_handler(void)
131
{
132
schedule_tail(current->thread.prev_sched);
133
134
/*
135
* XXX: if interrupt_end() calls schedule, this call to
136
* arch_switch_to isn't needed. We could want to apply this to
137
* improve performance. -bb
138
*/
139
arch_switch_to(current);
140
141
current->thread.prev_sched = NULL;
142
143
userspace(&current->thread.regs.regs);
144
}
145
146
int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
147
{
148
u64 clone_flags = args->flags;
149
unsigned long sp = args->stack;
150
unsigned long tls = args->tls;
151
void (*handler)(void);
152
int ret = 0;
153
154
p->thread = (struct thread_struct) INIT_THREAD;
155
156
if (!args->fn) {
157
memcpy(&p->thread.regs.regs, current_pt_regs(),
158
sizeof(p->thread.regs.regs));
159
PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
160
if (sp != 0)
161
REGS_SP(p->thread.regs.regs.gp) = sp;
162
163
handler = fork_handler;
164
165
arch_copy_thread(&current->thread.arch, &p->thread.arch);
166
} else {
167
get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
168
p->thread.request.thread.proc = args->fn;
169
p->thread.request.thread.arg = args->fn_arg;
170
handler = new_thread_handler;
171
}
172
173
new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
174
175
if (!args->fn) {
176
clear_flushed_tls(p);
177
178
/*
179
* Set a new TLS for the child thread?
180
*/
181
if (clone_flags & CLONE_SETTLS)
182
ret = arch_set_tls(p, tls);
183
}
184
185
return ret;
186
}
187
188
void initial_thread_cb(void (*proc)(void *), void *arg)
189
{
190
initial_thread_cb_skas(proc, arg);
191
}
192
193
int arch_dup_task_struct(struct task_struct *dst,
194
struct task_struct *src)
195
{
196
/* init_task is not dynamically sized (missing FPU state) */
197
if (unlikely(src == &init_task)) {
198
memcpy(dst, src, sizeof(init_task));
199
memset((void *)dst + sizeof(init_task), 0,
200
arch_task_struct_size - sizeof(init_task));
201
} else {
202
memcpy(dst, src, arch_task_struct_size);
203
}
204
205
return 0;
206
}
207
208
void um_idle_sleep(void)
209
{
210
if (time_travel_mode != TT_MODE_OFF)
211
time_travel_sleep();
212
else
213
os_idle_sleep();
214
}
215
216
void arch_cpu_idle(void)
217
{
218
um_idle_sleep();
219
}
220
221
void arch_cpu_idle_prepare(void)
222
{
223
os_idle_prepare();
224
}
225
226
int __uml_cant_sleep(void) {
227
return in_atomic() || irqs_disabled() || in_interrupt();
228
/* Is in_interrupt() really needed? */
229
}
230
231
int uml_need_resched(void)
232
{
233
return need_resched();
234
}
235
236
extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
237
238
void do_uml_exitcalls(void)
239
{
240
exitcall_t *call;
241
242
call = &__uml_exitcall_end;
243
while (--call >= &__uml_exitcall_begin)
244
(*call)();
245
}
246
247
char *uml_strdup(const char *string)
248
{
249
return kstrdup(string, GFP_KERNEL);
250
}
251
EXPORT_SYMBOL(uml_strdup);
252
253
int copy_from_user_proc(void *to, void __user *from, int size)
254
{
255
return copy_from_user(to, from, size);
256
}
257
258
int singlestepping(void)
259
{
260
return test_thread_flag(TIF_SINGLESTEP);
261
}
262
263
/*
264
* Only x86 and x86_64 have an arch_align_stack().
265
* All other arches have "#define arch_align_stack(x) (x)"
266
* in their asm/exec.h
267
* As this is included in UML from asm-um/system-generic.h,
268
* we can use it to behave as the subarch does.
269
*/
270
#ifndef arch_align_stack
271
unsigned long arch_align_stack(unsigned long sp)
272
{
273
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
274
sp -= get_random_u32_below(8192);
275
return sp & ~0xf;
276
}
277
#endif
278
279
unsigned long __get_wchan(struct task_struct *p)
280
{
281
unsigned long stack_page, sp, ip;
282
bool seen_sched = 0;
283
284
stack_page = (unsigned long) task_stack_page(p);
285
/* Bail if the process has no kernel stack for some reason */
286
if (stack_page == 0)
287
return 0;
288
289
sp = p->thread.switch_buf->JB_SP;
290
/*
291
* Bail if the stack pointer is below the bottom of the kernel
292
* stack for some reason
293
*/
294
if (sp < stack_page)
295
return 0;
296
297
while (sp < stack_page + THREAD_SIZE) {
298
ip = *((unsigned long *) sp);
299
if (in_sched_functions(ip))
300
/* Ignore everything until we're above the scheduler */
301
seen_sched = 1;
302
else if (kernel_text_address(ip) && seen_sched)
303
return ip;
304
305
sp += sizeof(unsigned long);
306
}
307
308
return 0;
309
}
310
311