Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/compat/linuxkpi/common/src/linux_current.c
39586 views
1
/*-
2
* Copyright (c) 2017 Hans Petter Selasky
3
* All rights reserved.
4
*
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
7
* are met:
8
* 1. Redistributions of source code must retain the above copyright
9
* notice unmodified, this list of conditions, and the following
10
* disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
*/
26
27
#include <sys/cdefs.h>
28
#ifdef __amd64__
29
#define DEV_APIC
30
#elif defined(__i386__)
31
#include "opt_apic.h"
32
#endif
33
34
#include <linux/compat.h>
35
#include <linux/completion.h>
36
#include <linux/mm.h>
37
#include <linux/kthread.h>
38
#include <linux/moduleparam.h>
39
40
#include <sys/kernel.h>
41
#include <sys/eventhandler.h>
42
#include <sys/malloc.h>
43
#include <sys/sysctl.h>
44
#include <vm/uma.h>
45
46
#ifdef DEV_APIC
47
extern u_int first_msi_irq, num_msi_irqs;
48
#endif
49
50
static eventhandler_tag linuxkpi_thread_dtor_tag;
51
52
static uma_zone_t linux_current_zone;
53
static uma_zone_t linux_mm_zone;
54
55
/* check if another thread already has a mm_struct */
56
static struct mm_struct *
57
find_other_mm(struct proc *p)
58
{
59
struct thread *td;
60
struct task_struct *ts;
61
struct mm_struct *mm;
62
63
PROC_LOCK_ASSERT(p, MA_OWNED);
64
FOREACH_THREAD_IN_PROC(p, td) {
65
ts = td->td_lkpi_task;
66
if (ts == NULL)
67
continue;
68
mm = ts->mm;
69
if (mm == NULL)
70
continue;
71
/* try to share other mm_struct */
72
if (atomic_inc_not_zero(&mm->mm_users))
73
return (mm);
74
}
75
return (NULL);
76
}
77
78
int
79
linux_alloc_current(struct thread *td, int flags)
80
{
81
struct proc *proc;
82
struct task_struct *ts;
83
struct mm_struct *mm, *mm_other;
84
85
MPASS(td->td_lkpi_task == NULL);
86
87
if ((td->td_pflags & TDP_ITHREAD) != 0 || !THREAD_CAN_SLEEP()) {
88
flags &= ~M_WAITOK;
89
flags |= M_NOWAIT | M_USE_RESERVE;
90
}
91
92
ts = uma_zalloc(linux_current_zone, flags | M_ZERO);
93
if (ts == NULL) {
94
if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
95
panic("linux_alloc_current: failed to allocate task");
96
return (ENOMEM);
97
}
98
mm = NULL;
99
100
/* setup new task structure */
101
atomic_set(&ts->kthread_flags, 0);
102
ts->task_thread = td;
103
ts->comm = td->td_name;
104
ts->pid = td->td_tid;
105
ts->group_leader = ts;
106
atomic_set(&ts->usage, 1);
107
atomic_set(&ts->state, TASK_RUNNING);
108
init_completion(&ts->parked);
109
init_completion(&ts->exited);
110
111
proc = td->td_proc;
112
113
PROC_LOCK(proc);
114
mm_other = find_other_mm(proc);
115
116
/* use allocated mm_struct as a fallback */
117
if (mm_other == NULL) {
118
PROC_UNLOCK(proc);
119
mm = uma_zalloc(linux_mm_zone, flags | M_ZERO);
120
if (mm == NULL) {
121
if ((flags & (M_WAITOK | M_NOWAIT)) == M_WAITOK)
122
panic(
123
"linux_alloc_current: failed to allocate mm");
124
uma_zfree(linux_current_zone, mm);
125
return (ENOMEM);
126
}
127
128
PROC_LOCK(proc);
129
mm_other = find_other_mm(proc);
130
if (mm_other == NULL) {
131
/* setup new mm_struct */
132
init_rwsem(&mm->mmap_sem);
133
atomic_set(&mm->mm_count, 1);
134
atomic_set(&mm->mm_users, 1);
135
/* set mm_struct pointer */
136
ts->mm = mm;
137
/* clear pointer to not free memory */
138
mm = NULL;
139
} else {
140
ts->mm = mm_other;
141
}
142
} else {
143
ts->mm = mm_other;
144
}
145
146
/* store pointer to task struct */
147
td->td_lkpi_task = ts;
148
PROC_UNLOCK(proc);
149
150
/* free mm_struct pointer, if any */
151
uma_zfree(linux_mm_zone, mm);
152
153
return (0);
154
}
155
156
struct mm_struct *
157
linux_get_task_mm(struct task_struct *task)
158
{
159
struct mm_struct *mm;
160
161
mm = task->mm;
162
if (mm != NULL) {
163
atomic_inc(&mm->mm_users);
164
return (mm);
165
}
166
return (NULL);
167
}
168
169
void
170
linux_mm_dtor(struct mm_struct *mm)
171
{
172
uma_zfree(linux_mm_zone, mm);
173
}
174
175
void
176
linux_free_current(struct task_struct *ts)
177
{
178
mmput(ts->mm);
179
uma_zfree(linux_current_zone, ts);
180
}
181
182
static void
183
linuxkpi_thread_dtor(void *arg __unused, struct thread *td)
184
{
185
struct task_struct *ts;
186
187
ts = td->td_lkpi_task;
188
if (ts == NULL)
189
return;
190
191
td->td_lkpi_task = NULL;
192
put_task_struct(ts);
193
}
194
195
static struct task_struct *
196
linux_get_pid_task_int(pid_t pid, const bool do_get)
197
{
198
struct thread *td;
199
struct proc *p;
200
struct task_struct *ts;
201
202
if (pid > PID_MAX) {
203
/* try to find corresponding thread */
204
td = tdfind(pid, -1);
205
if (td != NULL) {
206
ts = td->td_lkpi_task;
207
if (do_get && ts != NULL)
208
get_task_struct(ts);
209
PROC_UNLOCK(td->td_proc);
210
return (ts);
211
}
212
} else {
213
/* try to find corresponding procedure */
214
p = pfind(pid);
215
if (p != NULL) {
216
FOREACH_THREAD_IN_PROC(p, td) {
217
ts = td->td_lkpi_task;
218
if (ts != NULL) {
219
if (do_get)
220
get_task_struct(ts);
221
PROC_UNLOCK(p);
222
return (ts);
223
}
224
}
225
PROC_UNLOCK(p);
226
}
227
}
228
return (NULL);
229
}
230
231
struct task_struct *
232
linux_pid_task(pid_t pid)
233
{
234
return (linux_get_pid_task_int(pid, false));
235
}
236
237
struct task_struct *
238
linux_get_pid_task(pid_t pid)
239
{
240
return (linux_get_pid_task_int(pid, true));
241
}
242
243
bool
244
linux_task_exiting(struct task_struct *task)
245
{
246
struct thread *td;
247
struct proc *p;
248
bool ret;
249
250
ret = false;
251
252
/* try to find corresponding thread */
253
td = tdfind(task->pid, -1);
254
if (td != NULL) {
255
p = td->td_proc;
256
} else {
257
/* try to find corresponding procedure */
258
p = pfind(task->pid);
259
}
260
261
if (p != NULL) {
262
if ((p->p_flag & P_WEXIT) != 0)
263
ret = true;
264
PROC_UNLOCK(p);
265
}
266
return (ret);
267
}
268
269
static int lkpi_task_resrv;
270
SYSCTL_INT(_compat_linuxkpi, OID_AUTO, task_struct_reserve,
271
CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lkpi_task_resrv, 0,
272
"Number of struct task and struct mm to reserve for non-sleepable "
273
"allocations");
274
275
static void
276
linux_current_init(void *arg __unused)
277
{
278
TUNABLE_INT_FETCH("compat.linuxkpi.task_struct_reserve",
279
&lkpi_task_resrv);
280
if (lkpi_task_resrv == 0) {
281
#ifdef DEV_APIC
282
/*
283
* Number of interrupt threads plus per-cpu callout
284
* SWI threads.
285
*/
286
lkpi_task_resrv = first_msi_irq + num_msi_irqs + MAXCPU;
287
#else
288
lkpi_task_resrv = 1024; /* XXXKIB arbitrary */
289
#endif
290
}
291
linux_current_zone = uma_zcreate("lkpicurr",
292
sizeof(struct task_struct), NULL, NULL, NULL, NULL,
293
UMA_ALIGN_PTR, 0);
294
uma_zone_reserve(linux_current_zone, lkpi_task_resrv);
295
uma_prealloc(linux_current_zone, lkpi_task_resrv);
296
linux_mm_zone = uma_zcreate("lkpimm",
297
sizeof(struct mm_struct), NULL, NULL, NULL, NULL,
298
UMA_ALIGN_PTR, 0);
299
uma_zone_reserve(linux_mm_zone, lkpi_task_resrv);
300
uma_prealloc(linux_mm_zone, lkpi_task_resrv);
301
302
atomic_thread_fence_seq_cst();
303
304
linuxkpi_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
305
linuxkpi_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
306
lkpi_alloc_current = linux_alloc_current;
307
}
308
SYSINIT(linux_current, SI_SUB_EVENTHANDLER + 1, SI_ORDER_SECOND,
309
linux_current_init, NULL);
310
311
static void
312
linux_current_uninit(void *arg __unused)
313
{
314
struct proc *p;
315
struct task_struct *ts;
316
struct thread *td;
317
318
lkpi_alloc_current = linux_alloc_current_noop;
319
320
atomic_thread_fence_seq_cst();
321
322
sx_slock(&allproc_lock);
323
FOREACH_PROC_IN_SYSTEM(p) {
324
PROC_LOCK(p);
325
FOREACH_THREAD_IN_PROC(p, td) {
326
if ((ts = td->td_lkpi_task) != NULL) {
327
td->td_lkpi_task = NULL;
328
put_task_struct(ts);
329
}
330
}
331
PROC_UNLOCK(p);
332
}
333
sx_sunlock(&allproc_lock);
334
335
thread_reap_barrier();
336
337
EVENTHANDLER_DEREGISTER(thread_dtor, linuxkpi_thread_dtor_tag);
338
339
uma_zdestroy(linux_current_zone);
340
uma_zdestroy(linux_mm_zone);
341
}
342
SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER + 1, SI_ORDER_SECOND,
343
linux_current_uninit, NULL);
344
345