Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/compat/linuxkpi/common/src/linux_lock.c
39586 views
1
/*-
2
* Copyright (c) 2017 Mellanox Technologies, Ltd.
3
* All rights reserved.
4
*
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
7
* are met:
8
* 1. Redistributions of source code must retain the above copyright
9
* notice unmodified, this list of conditions, and the following
10
* disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
*/
26
27
#include <sys/queue.h>
28
29
#include <linux/sched.h>
30
#include <linux/ww_mutex.h>
31
32
struct ww_mutex_thread {
33
TAILQ_ENTRY(ww_mutex_thread) entry;
34
struct thread *thread;
35
struct ww_mutex *lock;
36
};
37
38
static TAILQ_HEAD(, ww_mutex_thread) ww_mutex_head;
39
static struct mtx ww_mutex_global;
40
41
static void
42
linux_ww_init(void *arg)
43
{
44
TAILQ_INIT(&ww_mutex_head);
45
mtx_init(&ww_mutex_global, "lkpi-ww-mtx", NULL, MTX_DEF);
46
}
47
48
SYSINIT(ww_init, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_init, NULL);
49
50
static void
51
linux_ww_uninit(void *arg)
52
{
53
mtx_destroy(&ww_mutex_global);
54
}
55
56
SYSUNINIT(ww_uninit, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_uninit, NULL);
57
58
static inline void
59
linux_ww_lock(void)
60
{
61
mtx_lock(&ww_mutex_global);
62
}
63
64
static inline void
65
linux_ww_unlock(void)
66
{
67
mtx_unlock(&ww_mutex_global);
68
}
69
70
/* lock a mutex with deadlock avoidance */
71
int
72
linux_ww_mutex_lock_sub(struct ww_mutex *lock,
73
struct ww_acquire_ctx *ctx, int catch_signal)
74
{
75
struct task_struct *task;
76
struct ww_mutex_thread entry;
77
struct ww_mutex_thread *other;
78
int retval = 0;
79
80
task = current;
81
82
linux_ww_lock();
83
if (unlikely(sx_try_xlock(&lock->base.sx) == 0)) {
84
entry.thread = curthread;
85
entry.lock = lock;
86
TAILQ_INSERT_TAIL(&ww_mutex_head, &entry, entry);
87
88
do {
89
struct thread *owner = (struct thread *)
90
SX_OWNER(lock->base.sx.sx_lock);
91
92
/* scan for deadlock */
93
TAILQ_FOREACH(other, &ww_mutex_head, entry) {
94
/* skip own thread */
95
if (other == &entry)
96
continue;
97
/*
98
* If another thread is owning our
99
* lock and is at the same time trying
100
* to acquire a lock this thread owns,
101
* that means deadlock.
102
*/
103
if (other->thread == owner &&
104
(struct thread *)SX_OWNER(
105
other->lock->base.sx.sx_lock) == curthread) {
106
retval = -EDEADLK;
107
goto done;
108
}
109
}
110
if (catch_signal) {
111
retval = -cv_wait_sig(&lock->condvar, &ww_mutex_global);
112
if (retval != 0) {
113
linux_schedule_save_interrupt_value(task, retval);
114
retval = -EINTR;
115
goto done;
116
}
117
} else {
118
cv_wait(&lock->condvar, &ww_mutex_global);
119
}
120
} while (sx_try_xlock(&lock->base.sx) == 0);
121
done:
122
TAILQ_REMOVE(&ww_mutex_head, &entry, entry);
123
124
/* if the lock is free, wakeup next lock waiter, if any */
125
if ((struct thread *)SX_OWNER(lock->base.sx.sx_lock) == NULL)
126
cv_signal(&lock->condvar);
127
}
128
129
if (retval == 0)
130
lock->ctx = ctx;
131
linux_ww_unlock();
132
return (retval);
133
}
134
135
void
136
linux_ww_mutex_unlock_sub(struct ww_mutex *lock)
137
{
138
/* protect ww_mutex ownership change */
139
linux_ww_lock();
140
lock->ctx = NULL;
141
sx_xunlock(&lock->base.sx);
142
/* wakeup a lock waiter, if any */
143
cv_signal(&lock->condvar);
144
linux_ww_unlock();
145
}
146
147
int
148
linux_mutex_lock_interruptible(mutex_t *m)
149
{
150
int error;
151
152
error = -sx_xlock_sig(&m->sx);
153
if (error != 0) {
154
linux_schedule_save_interrupt_value(current, error);
155
error = -EINTR;
156
}
157
return (error);
158
}
159
160
int
161
linux_down_read_killable(struct rw_semaphore *rw)
162
{
163
int error;
164
165
error = -sx_slock_sig(&rw->sx);
166
if (error != 0) {
167
linux_schedule_save_interrupt_value(current, error);
168
error = -EINTR;
169
}
170
return (error);
171
}
172
173
int
174
linux_down_write_killable(struct rw_semaphore *rw)
175
{
176
int error;
177
178
error = -sx_xlock_sig(&rw->sx);
179
if (error != 0) {
180
linux_schedule_save_interrupt_value(current, error);
181
error = -EINTR;
182
}
183
return (error);
184
}
185
186