Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/ipc/namespace.c
26131 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* linux/ipc/namespace.c
4
* Copyright (C) 2006 Pavel Emelyanov <[email protected]> OpenVZ, SWsoft Inc.
5
*/
6
7
#include <linux/ipc.h>
8
#include <linux/msg.h>
9
#include <linux/ipc_namespace.h>
10
#include <linux/rcupdate.h>
11
#include <linux/nsproxy.h>
12
#include <linux/slab.h>
13
#include <linux/cred.h>
14
#include <linux/fs.h>
15
#include <linux/mount.h>
16
#include <linux/user_namespace.h>
17
#include <linux/proc_ns.h>
18
#include <linux/sched/task.h>
19
20
#include "util.h"
21
22
/*
23
* The work queue is used to avoid the cost of synchronize_rcu in kern_unmount.
24
*/
25
static void free_ipc(struct work_struct *unused);
26
static DECLARE_WORK(free_ipc_work, free_ipc);
27
28
static struct ucounts *inc_ipc_namespaces(struct user_namespace *ns)
29
{
30
return inc_ucount(ns, current_euid(), UCOUNT_IPC_NAMESPACES);
31
}
32
33
static void dec_ipc_namespaces(struct ucounts *ucounts)
34
{
35
dec_ucount(ucounts, UCOUNT_IPC_NAMESPACES);
36
}
37
38
static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
39
struct ipc_namespace *old_ns)
40
{
41
struct ipc_namespace *ns;
42
struct ucounts *ucounts;
43
int err;
44
45
err = -ENOSPC;
46
again:
47
ucounts = inc_ipc_namespaces(user_ns);
48
if (!ucounts) {
49
/*
50
* IPC namespaces are freed asynchronously, by free_ipc_work.
51
* If frees were pending, flush_work will wait, and
52
* return true. Fail the allocation if no frees are pending.
53
*/
54
if (flush_work(&free_ipc_work))
55
goto again;
56
goto fail;
57
}
58
59
err = -ENOMEM;
60
ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL_ACCOUNT);
61
if (ns == NULL)
62
goto fail_dec;
63
64
err = ns_alloc_inum(&ns->ns);
65
if (err)
66
goto fail_free;
67
ns->ns.ops = &ipcns_operations;
68
69
refcount_set(&ns->ns.count, 1);
70
ns->user_ns = get_user_ns(user_ns);
71
ns->ucounts = ucounts;
72
73
err = mq_init_ns(ns);
74
if (err)
75
goto fail_put;
76
77
err = -ENOMEM;
78
if (!setup_mq_sysctls(ns))
79
goto fail_put;
80
81
if (!setup_ipc_sysctls(ns))
82
goto fail_mq;
83
84
err = msg_init_ns(ns);
85
if (err)
86
goto fail_ipc;
87
88
sem_init_ns(ns);
89
shm_init_ns(ns);
90
91
return ns;
92
93
fail_ipc:
94
retire_ipc_sysctls(ns);
95
fail_mq:
96
retire_mq_sysctls(ns);
97
98
fail_put:
99
put_user_ns(ns->user_ns);
100
ns_free_inum(&ns->ns);
101
fail_free:
102
kfree(ns);
103
fail_dec:
104
dec_ipc_namespaces(ucounts);
105
fail:
106
return ERR_PTR(err);
107
}
108
109
struct ipc_namespace *copy_ipcs(unsigned long flags,
110
struct user_namespace *user_ns, struct ipc_namespace *ns)
111
{
112
if (!(flags & CLONE_NEWIPC))
113
return get_ipc_ns(ns);
114
return create_ipc_ns(user_ns, ns);
115
}
116
117
/*
118
* free_ipcs - free all ipcs of one type
119
* @ns: the namespace to remove the ipcs from
120
* @ids: the table of ipcs to free
121
* @free: the function called to free each individual ipc
122
*
123
* Called for each kind of ipc when an ipc_namespace exits.
124
*/
125
void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
126
void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
127
{
128
struct kern_ipc_perm *perm;
129
int next_id;
130
int total, in_use;
131
132
down_write(&ids->rwsem);
133
134
in_use = ids->in_use;
135
136
for (total = 0, next_id = 0; total < in_use; next_id++) {
137
perm = idr_find(&ids->ipcs_idr, next_id);
138
if (perm == NULL)
139
continue;
140
rcu_read_lock();
141
ipc_lock_object(perm);
142
free(ns, perm);
143
total++;
144
}
145
up_write(&ids->rwsem);
146
}
147
148
static void free_ipc_ns(struct ipc_namespace *ns)
149
{
150
/*
151
* Caller needs to wait for an RCU grace period to have passed
152
* after making the mount point inaccessible to new accesses.
153
*/
154
mntput(ns->mq_mnt);
155
sem_exit_ns(ns);
156
msg_exit_ns(ns);
157
shm_exit_ns(ns);
158
159
retire_mq_sysctls(ns);
160
retire_ipc_sysctls(ns);
161
162
dec_ipc_namespaces(ns->ucounts);
163
put_user_ns(ns->user_ns);
164
ns_free_inum(&ns->ns);
165
kfree(ns);
166
}
167
168
static LLIST_HEAD(free_ipc_list);
169
static void free_ipc(struct work_struct *unused)
170
{
171
struct llist_node *node = llist_del_all(&free_ipc_list);
172
struct ipc_namespace *n, *t;
173
174
llist_for_each_entry_safe(n, t, node, mnt_llist)
175
mnt_make_shortterm(n->mq_mnt);
176
177
/* Wait for any last users to have gone away. */
178
synchronize_rcu();
179
180
llist_for_each_entry_safe(n, t, node, mnt_llist)
181
free_ipc_ns(n);
182
}
183
184
/*
185
* put_ipc_ns - drop a reference to an ipc namespace.
186
* @ns: the namespace to put
187
*
188
* If this is the last task in the namespace exiting, and
189
* it is dropping the refcount to 0, then it can race with
190
* a task in another ipc namespace but in a mounts namespace
191
* which has this ipcns's mqueuefs mounted, doing some action
192
* with one of the mqueuefs files. That can raise the refcount.
193
* So dropping the refcount, and raising the refcount when
194
* accessing it through the VFS, are protected with mq_lock.
195
*
196
* (Clearly, a task raising the refcount on its own ipc_ns
197
* needn't take mq_lock since it can't race with the last task
198
* in the ipcns exiting).
199
*/
200
void put_ipc_ns(struct ipc_namespace *ns)
201
{
202
if (refcount_dec_and_lock(&ns->ns.count, &mq_lock)) {
203
mq_clear_sbinfo(ns);
204
spin_unlock(&mq_lock);
205
206
if (llist_add(&ns->mnt_llist, &free_ipc_list))
207
schedule_work(&free_ipc_work);
208
}
209
}
210
211
static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
212
{
213
return container_of(ns, struct ipc_namespace, ns);
214
}
215
216
static struct ns_common *ipcns_get(struct task_struct *task)
217
{
218
struct ipc_namespace *ns = NULL;
219
struct nsproxy *nsproxy;
220
221
task_lock(task);
222
nsproxy = task->nsproxy;
223
if (nsproxy)
224
ns = get_ipc_ns(nsproxy->ipc_ns);
225
task_unlock(task);
226
227
return ns ? &ns->ns : NULL;
228
}
229
230
static void ipcns_put(struct ns_common *ns)
231
{
232
return put_ipc_ns(to_ipc_ns(ns));
233
}
234
235
static int ipcns_install(struct nsset *nsset, struct ns_common *new)
236
{
237
struct nsproxy *nsproxy = nsset->nsproxy;
238
struct ipc_namespace *ns = to_ipc_ns(new);
239
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
240
!ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
241
return -EPERM;
242
243
put_ipc_ns(nsproxy->ipc_ns);
244
nsproxy->ipc_ns = get_ipc_ns(ns);
245
return 0;
246
}
247
248
static struct user_namespace *ipcns_owner(struct ns_common *ns)
249
{
250
return to_ipc_ns(ns)->user_ns;
251
}
252
253
const struct proc_ns_operations ipcns_operations = {
254
.name = "ipc",
255
.type = CLONE_NEWIPC,
256
.get = ipcns_get,
257
.put = ipcns_put,
258
.install = ipcns_install,
259
.owner = ipcns_owner,
260
};
261
262