Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/compat/linuxkpi/common/src/linux_kthread.c
39586 views
1
/*-
2
* Copyright (c) 2017 Hans Petter Selasky
3
* All rights reserved.
4
*
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
7
* are met:
8
* 1. Redistributions of source code must retain the above copyright
9
* notice unmodified, this list of conditions, and the following
10
* disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
*/
26
27
#include <sys/cdefs.h>
28
#include <linux/compat.h>
29
#include <linux/kthread.h>
30
#include <linux/sched.h>
31
#include <linux/wait.h>
32
33
#include <sys/bus.h>
34
#include <sys/interrupt.h>
35
#include <sys/priority.h>
36
37
enum {
38
KTHREAD_SHOULD_STOP_MASK = (1 << 0),
39
KTHREAD_SHOULD_PARK_MASK = (1 << 1),
40
KTHREAD_IS_PARKED_MASK = (1 << 2),
41
};
42
43
bool
44
linux_kthread_should_stop_task(struct task_struct *task)
45
{
46
47
return (atomic_read(&task->kthread_flags) & KTHREAD_SHOULD_STOP_MASK);
48
}
49
50
bool
51
linux_kthread_should_stop(void)
52
{
53
54
return (atomic_read(&current->kthread_flags) & KTHREAD_SHOULD_STOP_MASK);
55
}
56
57
int
58
linux_kthread_stop(struct task_struct *task)
59
{
60
int retval;
61
62
/*
63
* Assume task is still alive else caller should not call
64
* kthread_stop():
65
*/
66
atomic_or(KTHREAD_SHOULD_STOP_MASK, &task->kthread_flags);
67
kthread_unpark(task);
68
wake_up_process(task);
69
wait_for_completion(&task->exited);
70
71
/*
72
* Get return code and free task structure:
73
*/
74
retval = task->task_ret;
75
put_task_struct(task);
76
77
return (retval);
78
}
79
80
int
81
linux_kthread_park(struct task_struct *task)
82
{
83
84
atomic_or(KTHREAD_SHOULD_PARK_MASK, &task->kthread_flags);
85
wake_up_process(task);
86
wait_for_completion(&task->parked);
87
return (0);
88
}
89
90
void
91
linux_kthread_parkme(void)
92
{
93
struct task_struct *task;
94
95
task = current;
96
set_task_state(task, TASK_PARKED | TASK_UNINTERRUPTIBLE);
97
while (linux_kthread_should_park()) {
98
while ((atomic_fetch_or(KTHREAD_IS_PARKED_MASK,
99
&task->kthread_flags) & KTHREAD_IS_PARKED_MASK) == 0)
100
complete(&task->parked);
101
schedule();
102
set_task_state(task, TASK_PARKED | TASK_UNINTERRUPTIBLE);
103
}
104
atomic_andnot(KTHREAD_IS_PARKED_MASK, &task->kthread_flags);
105
set_task_state(task, TASK_RUNNING);
106
}
107
108
bool
109
linux_kthread_should_park(void)
110
{
111
struct task_struct *task;
112
113
task = current;
114
return (atomic_read(&task->kthread_flags) & KTHREAD_SHOULD_PARK_MASK);
115
}
116
117
void
118
linux_kthread_unpark(struct task_struct *task)
119
{
120
121
atomic_andnot(KTHREAD_SHOULD_PARK_MASK, &task->kthread_flags);
122
if ((atomic_fetch_andnot(KTHREAD_IS_PARKED_MASK, &task->kthread_flags) &
123
KTHREAD_IS_PARKED_MASK) != 0)
124
wake_up_state(task, TASK_PARKED);
125
}
126
127
struct task_struct *
128
linux_kthread_setup_and_run(struct thread *td, linux_task_fn_t *task_fn, void *arg)
129
{
130
struct task_struct *task;
131
132
linux_set_current(td);
133
134
task = td->td_lkpi_task;
135
task->task_fn = task_fn;
136
task->task_data = arg;
137
138
thread_lock(td);
139
/* make sure the scheduler priority is raised */
140
sched_prio(td, PI_SWI(SWI_NET));
141
/* put thread into run-queue */
142
sched_add(td, SRQ_BORING);
143
144
return (task);
145
}
146
147
void
148
linux_kthread_fn(void *arg __unused)
149
{
150
struct task_struct *task = current;
151
152
if (linux_kthread_should_stop_task(task) == 0)
153
task->task_ret = task->task_fn(task->task_data);
154
155
if (linux_kthread_should_stop_task(task) != 0) {
156
struct thread *td = curthread;
157
158
/* let kthread_stop() free data */
159
td->td_lkpi_task = NULL;
160
161
/* wakeup kthread_stop() */
162
complete(&task->exited);
163
}
164
kthread_exit();
165
}
166
167
void
168
lkpi_kthread_work_fn(void *context, int pending __unused)
169
{
170
struct kthread_work *work = context;
171
172
work->func(work);
173
}
174
175
void
176
lkpi_kthread_worker_init_fn(void *context, int pending __unused)
177
{
178
struct kthread_worker *worker = context;
179
180
worker->task = current;
181
}
182
183