Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/security/landlock/fs.c
49527 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Landlock - Filesystem management and hooks
4
*
5
* Copyright © 2016-2020 Mickaël Salaün <[email protected]>
6
* Copyright © 2018-2020 ANSSI
7
* Copyright © 2021-2025 Microsoft Corporation
8
* Copyright © 2022 Günther Noack <[email protected]>
9
* Copyright © 2023-2024 Google LLC
10
*/
11
12
#include <asm/ioctls.h>
13
#include <kunit/test.h>
14
#include <linux/atomic.h>
15
#include <linux/bitops.h>
16
#include <linux/bits.h>
17
#include <linux/compiler_types.h>
18
#include <linux/dcache.h>
19
#include <linux/err.h>
20
#include <linux/falloc.h>
21
#include <linux/fs.h>
22
#include <linux/init.h>
23
#include <linux/kernel.h>
24
#include <linux/limits.h>
25
#include <linux/list.h>
26
#include <linux/lsm_audit.h>
27
#include <linux/lsm_hooks.h>
28
#include <linux/mount.h>
29
#include <linux/namei.h>
30
#include <linux/path.h>
31
#include <linux/pid.h>
32
#include <linux/rcupdate.h>
33
#include <linux/sched/signal.h>
34
#include <linux/spinlock.h>
35
#include <linux/stat.h>
36
#include <linux/types.h>
37
#include <linux/wait_bit.h>
38
#include <linux/workqueue.h>
39
#include <uapi/linux/fiemap.h>
40
#include <uapi/linux/landlock.h>
41
42
#include "access.h"
43
#include "audit.h"
44
#include "common.h"
45
#include "cred.h"
46
#include "domain.h"
47
#include "fs.h"
48
#include "limits.h"
49
#include "object.h"
50
#include "ruleset.h"
51
#include "setup.h"
52
53
/* Underlying object management */
54
55
static void release_inode(struct landlock_object *const object)
56
__releases(object->lock)
57
{
58
struct inode *const inode = object->underobj;
59
struct super_block *sb;
60
61
if (!inode) {
62
spin_unlock(&object->lock);
63
return;
64
}
65
66
/*
67
* Protects against concurrent use by hook_sb_delete() of the reference
68
* to the underlying inode.
69
*/
70
object->underobj = NULL;
71
/*
72
* Makes sure that if the filesystem is concurrently unmounted,
73
* hook_sb_delete() will wait for us to finish iput().
74
*/
75
sb = inode->i_sb;
76
atomic_long_inc(&landlock_superblock(sb)->inode_refs);
77
spin_unlock(&object->lock);
78
/*
79
* Because object->underobj was not NULL, hook_sb_delete() and
80
* get_inode_object() guarantee that it is safe to reset
81
* landlock_inode(inode)->object while it is not NULL. It is therefore
82
* not necessary to lock inode->i_lock.
83
*/
84
rcu_assign_pointer(landlock_inode(inode)->object, NULL);
85
/*
86
* Now, new rules can safely be tied to @inode with get_inode_object().
87
*/
88
89
iput(inode);
90
if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
91
wake_up_var(&landlock_superblock(sb)->inode_refs);
92
}
93
94
static const struct landlock_object_underops landlock_fs_underops = {
95
.release = release_inode
96
};
97
98
/* IOCTL helpers */
99
100
/**
101
* is_masked_device_ioctl - Determine whether an IOCTL command is always
102
* permitted with Landlock for device files. These commands can not be
103
* restricted on device files by enforcing a Landlock policy.
104
*
105
* @cmd: The IOCTL command that is supposed to be run.
106
*
107
* By default, any IOCTL on a device file requires the
108
* LANDLOCK_ACCESS_FS_IOCTL_DEV right. However, we blanket-permit some
109
* commands, if:
110
*
111
* 1. The command is implemented in fs/ioctl.c's do_vfs_ioctl(),
112
* not in f_ops->unlocked_ioctl() or f_ops->compat_ioctl().
113
*
114
* 2. The command is harmless when invoked on devices.
115
*
116
* We also permit commands that do not make sense for devices, but where the
117
* do_vfs_ioctl() implementation returns a more conventional error code.
118
*
119
* Any new IOCTL commands that are implemented in fs/ioctl.c's do_vfs_ioctl()
120
* should be considered for inclusion here.
121
*
122
* Returns: true if the IOCTL @cmd can not be restricted with Landlock for
123
* device files.
124
*/
125
static __attribute_const__ bool is_masked_device_ioctl(const unsigned int cmd)
126
{
127
switch (cmd) {
128
/*
129
* FIOCLEX, FIONCLEX, FIONBIO and FIOASYNC manipulate the FD's
130
* close-on-exec and the file's buffered-IO and async flags. These
131
* operations are also available through fcntl(2), and are
132
* unconditionally permitted in Landlock.
133
*/
134
case FIOCLEX:
135
case FIONCLEX:
136
case FIONBIO:
137
case FIOASYNC:
138
/*
139
* FIOQSIZE queries the size of a regular file, directory, or link.
140
*
141
* We still permit it, because it always returns -ENOTTY for
142
* other file types.
143
*/
144
case FIOQSIZE:
145
/*
146
* FIFREEZE and FITHAW freeze and thaw the file system which the
147
* given file belongs to. Requires CAP_SYS_ADMIN.
148
*
149
* These commands operate on the file system's superblock rather
150
* than on the file itself. The same operations can also be
151
* done through any other file or directory on the same file
152
* system, so it is safe to permit these.
153
*/
154
case FIFREEZE:
155
case FITHAW:
156
/*
157
* FS_IOC_FIEMAP queries information about the allocation of
158
* blocks within a file.
159
*
160
* This IOCTL command only makes sense for regular files and is
161
* not implemented by devices. It is harmless to permit.
162
*/
163
case FS_IOC_FIEMAP:
164
/*
165
* FIGETBSZ queries the file system's block size for a file or
166
* directory.
167
*
168
* This command operates on the file system's superblock rather
169
* than on the file itself. The same operation can also be done
170
* through any other file or directory on the same file system,
171
* so it is safe to permit it.
172
*/
173
case FIGETBSZ:
174
/*
175
* FICLONE, FICLONERANGE and FIDEDUPERANGE make files share
176
* their underlying storage ("reflink") between source and
177
* destination FDs, on file systems which support that.
178
*
179
* These IOCTL commands only apply to regular files
180
* and are harmless to permit for device files.
181
*/
182
case FICLONE:
183
case FICLONERANGE:
184
case FIDEDUPERANGE:
185
/*
186
* FS_IOC_GETFSUUID and FS_IOC_GETFSSYSFSPATH both operate on
187
* the file system superblock, not on the specific file, so
188
* these operations are available through any other file on the
189
* same file system as well.
190
*/
191
case FS_IOC_GETFSUUID:
192
case FS_IOC_GETFSSYSFSPATH:
193
return true;
194
195
/*
196
* FIONREAD, FS_IOC_GETFLAGS, FS_IOC_SETFLAGS, FS_IOC_FSGETXATTR and
197
* FS_IOC_FSSETXATTR are forwarded to device implementations.
198
*/
199
200
/*
201
* file_ioctl() commands (FIBMAP, FS_IOC_RESVSP, FS_IOC_RESVSP64,
202
* FS_IOC_UNRESVSP, FS_IOC_UNRESVSP64 and FS_IOC_ZERO_RANGE) are
203
* forwarded to device implementations, so not permitted.
204
*/
205
206
/* Other commands are guarded by the access right. */
207
default:
208
return false;
209
}
210
}
211
212
/*
213
* is_masked_device_ioctl_compat - same as the helper above, but checking the
214
* "compat" IOCTL commands.
215
*
216
* The IOCTL commands with special handling in compat-mode should behave the
217
* same as their non-compat counterparts.
218
*/
219
static __attribute_const__ bool
220
is_masked_device_ioctl_compat(const unsigned int cmd)
221
{
222
switch (cmd) {
223
/* FICLONE is permitted, same as in the non-compat variant. */
224
case FICLONE:
225
return true;
226
227
#if defined(CONFIG_X86_64)
228
/*
229
* FS_IOC_RESVSP_32, FS_IOC_RESVSP64_32, FS_IOC_UNRESVSP_32,
230
* FS_IOC_UNRESVSP64_32, FS_IOC_ZERO_RANGE_32: not blanket-permitted,
231
* for consistency with their non-compat variants.
232
*/
233
case FS_IOC_RESVSP_32:
234
case FS_IOC_RESVSP64_32:
235
case FS_IOC_UNRESVSP_32:
236
case FS_IOC_UNRESVSP64_32:
237
case FS_IOC_ZERO_RANGE_32:
238
#endif
239
240
/*
241
* FS_IOC32_GETFLAGS, FS_IOC32_SETFLAGS are forwarded to their device
242
* implementations.
243
*/
244
case FS_IOC32_GETFLAGS:
245
case FS_IOC32_SETFLAGS:
246
return false;
247
default:
248
return is_masked_device_ioctl(cmd);
249
}
250
}
251
252
/* Ruleset management */
253
254
static struct landlock_object *get_inode_object(struct inode *const inode)
255
{
256
struct landlock_object *object, *new_object;
257
struct landlock_inode_security *inode_sec = landlock_inode(inode);
258
259
rcu_read_lock();
260
retry:
261
object = rcu_dereference(inode_sec->object);
262
if (object) {
263
if (likely(refcount_inc_not_zero(&object->usage))) {
264
rcu_read_unlock();
265
return object;
266
}
267
/*
268
* We are racing with release_inode(), the object is going
269
* away. Wait for release_inode(), then retry.
270
*/
271
spin_lock(&object->lock);
272
spin_unlock(&object->lock);
273
goto retry;
274
}
275
rcu_read_unlock();
276
277
/*
278
* If there is no object tied to @inode, then create a new one (without
279
* holding any locks).
280
*/
281
new_object = landlock_create_object(&landlock_fs_underops, inode);
282
if (IS_ERR(new_object))
283
return new_object;
284
285
/*
286
* Protects against concurrent calls to get_inode_object() or
287
* hook_sb_delete().
288
*/
289
spin_lock(&inode->i_lock);
290
if (unlikely(rcu_access_pointer(inode_sec->object))) {
291
/* Someone else just created the object, bail out and retry. */
292
spin_unlock(&inode->i_lock);
293
kfree(new_object);
294
295
rcu_read_lock();
296
goto retry;
297
}
298
299
/*
300
* @inode will be released by hook_sb_delete() on its superblock
301
* shutdown, or by release_inode() when no more ruleset references the
302
* related object.
303
*/
304
ihold(inode);
305
rcu_assign_pointer(inode_sec->object, new_object);
306
spin_unlock(&inode->i_lock);
307
return new_object;
308
}
309
310
/* All access rights that can be tied to files. */
311
/* clang-format off */
312
#define ACCESS_FILE ( \
313
LANDLOCK_ACCESS_FS_EXECUTE | \
314
LANDLOCK_ACCESS_FS_WRITE_FILE | \
315
LANDLOCK_ACCESS_FS_READ_FILE | \
316
LANDLOCK_ACCESS_FS_TRUNCATE | \
317
LANDLOCK_ACCESS_FS_IOCTL_DEV)
318
/* clang-format on */
319
320
/*
321
* @path: Should have been checked by get_path_from_fd().
322
*/
323
int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
324
const struct path *const path,
325
access_mask_t access_rights)
326
{
327
int err;
328
struct landlock_id id = {
329
.type = LANDLOCK_KEY_INODE,
330
};
331
332
/* Files only get access rights that make sense. */
333
if (!d_is_dir(path->dentry) &&
334
(access_rights | ACCESS_FILE) != ACCESS_FILE)
335
return -EINVAL;
336
if (WARN_ON_ONCE(ruleset->num_layers != 1))
337
return -EINVAL;
338
339
/* Transforms relative access rights to absolute ones. */
340
access_rights |= LANDLOCK_MASK_ACCESS_FS &
341
~landlock_get_fs_access_mask(ruleset, 0);
342
id.key.object = get_inode_object(d_backing_inode(path->dentry));
343
if (IS_ERR(id.key.object))
344
return PTR_ERR(id.key.object);
345
mutex_lock(&ruleset->lock);
346
err = landlock_insert_rule(ruleset, id, access_rights);
347
mutex_unlock(&ruleset->lock);
348
/*
349
* No need to check for an error because landlock_insert_rule()
350
* increments the refcount for the new object if needed.
351
*/
352
landlock_put_object(id.key.object);
353
return err;
354
}
355
356
/* Access-control management */
357
358
/*
359
* The lifetime of the returned rule is tied to @domain.
360
*
361
* Returns NULL if no rule is found or if @dentry is negative.
362
*/
363
static const struct landlock_rule *
364
find_rule(const struct landlock_ruleset *const domain,
365
const struct dentry *const dentry)
366
{
367
const struct landlock_rule *rule;
368
const struct inode *inode;
369
struct landlock_id id = {
370
.type = LANDLOCK_KEY_INODE,
371
};
372
373
/* Ignores nonexistent leafs. */
374
if (d_is_negative(dentry))
375
return NULL;
376
377
inode = d_backing_inode(dentry);
378
rcu_read_lock();
379
id.key.object = rcu_dereference(landlock_inode(inode)->object);
380
rule = landlock_find_rule(domain, id);
381
rcu_read_unlock();
382
return rule;
383
}
384
385
/*
386
* Allows access to pseudo filesystems that will never be mountable (e.g.
387
* sockfs, pipefs), but can still be reachable through
388
* /proc/<pid>/fd/<file-descriptor>
389
*/
390
static bool is_nouser_or_private(const struct dentry *dentry)
391
{
392
return (dentry->d_sb->s_flags & SB_NOUSER) ||
393
(d_is_positive(dentry) &&
394
unlikely(IS_PRIVATE(d_backing_inode(dentry))));
395
}
396
397
static const struct access_masks any_fs = {
398
.fs = ~0,
399
};
400
401
/*
402
* Check that a destination file hierarchy has more restrictions than a source
403
* file hierarchy. This is only used for link and rename actions.
404
*
405
* @layer_masks_child2: Optional child masks.
406
*/
407
static bool no_more_access(
408
const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
409
const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
410
const bool child1_is_directory,
411
const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
412
const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],
413
const bool child2_is_directory)
414
{
415
unsigned long access_bit;
416
417
for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2);
418
access_bit++) {
419
/* Ignores accesses that only make sense for directories. */
420
const bool is_file_access =
421
!!(BIT_ULL(access_bit) & ACCESS_FILE);
422
423
if (child1_is_directory || is_file_access) {
424
/*
425
* Checks if the destination restrictions are a
426
* superset of the source ones (i.e. inherited access
427
* rights without child exceptions):
428
* restrictions(parent2) >= restrictions(child1)
429
*/
430
if ((((*layer_masks_parent1)[access_bit] &
431
(*layer_masks_child1)[access_bit]) |
432
(*layer_masks_parent2)[access_bit]) !=
433
(*layer_masks_parent2)[access_bit])
434
return false;
435
}
436
437
if (!layer_masks_child2)
438
continue;
439
if (child2_is_directory || is_file_access) {
440
/*
441
* Checks inverted restrictions for RENAME_EXCHANGE:
442
* restrictions(parent1) >= restrictions(child2)
443
*/
444
if ((((*layer_masks_parent2)[access_bit] &
445
(*layer_masks_child2)[access_bit]) |
446
(*layer_masks_parent1)[access_bit]) !=
447
(*layer_masks_parent1)[access_bit])
448
return false;
449
}
450
}
451
return true;
452
}
453
454
#define NMA_TRUE(...) KUNIT_EXPECT_TRUE(test, no_more_access(__VA_ARGS__))
455
#define NMA_FALSE(...) KUNIT_EXPECT_FALSE(test, no_more_access(__VA_ARGS__))
456
457
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
458
459
static void test_no_more_access(struct kunit *const test)
460
{
461
const layer_mask_t rx0[LANDLOCK_NUM_ACCESS_FS] = {
462
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
463
[BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT_ULL(0),
464
};
465
const layer_mask_t mx0[LANDLOCK_NUM_ACCESS_FS] = {
466
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
467
[BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = BIT_ULL(0),
468
};
469
const layer_mask_t x0[LANDLOCK_NUM_ACCESS_FS] = {
470
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
471
};
472
const layer_mask_t x1[LANDLOCK_NUM_ACCESS_FS] = {
473
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(1),
474
};
475
const layer_mask_t x01[LANDLOCK_NUM_ACCESS_FS] = {
476
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) |
477
BIT_ULL(1),
478
};
479
const layer_mask_t allows_all[LANDLOCK_NUM_ACCESS_FS] = {};
480
481
/* Checks without restriction. */
482
NMA_TRUE(&x0, &allows_all, false, &allows_all, NULL, false);
483
NMA_TRUE(&allows_all, &x0, false, &allows_all, NULL, false);
484
NMA_FALSE(&x0, &x0, false, &allows_all, NULL, false);
485
486
/*
487
* Checks that we can only refer a file if no more access could be
488
* inherited.
489
*/
490
NMA_TRUE(&x0, &x0, false, &rx0, NULL, false);
491
NMA_TRUE(&rx0, &rx0, false, &rx0, NULL, false);
492
NMA_FALSE(&rx0, &rx0, false, &x0, NULL, false);
493
NMA_FALSE(&rx0, &rx0, false, &x1, NULL, false);
494
495
/* Checks allowed referring with different nested domains. */
496
NMA_TRUE(&x0, &x1, false, &x0, NULL, false);
497
NMA_TRUE(&x1, &x0, false, &x0, NULL, false);
498
NMA_TRUE(&x0, &x01, false, &x0, NULL, false);
499
NMA_TRUE(&x0, &x01, false, &rx0, NULL, false);
500
NMA_TRUE(&x01, &x0, false, &x0, NULL, false);
501
NMA_TRUE(&x01, &x0, false, &rx0, NULL, false);
502
NMA_FALSE(&x01, &x01, false, &x0, NULL, false);
503
504
/* Checks that file access rights are also enforced for a directory. */
505
NMA_FALSE(&rx0, &rx0, true, &x0, NULL, false);
506
507
/* Checks that directory access rights don't impact file referring... */
508
NMA_TRUE(&mx0, &mx0, false, &x0, NULL, false);
509
/* ...but only directory referring. */
510
NMA_FALSE(&mx0, &mx0, true, &x0, NULL, false);
511
512
/* Checks directory exchange. */
513
NMA_TRUE(&mx0, &mx0, true, &mx0, &mx0, true);
514
NMA_TRUE(&mx0, &mx0, true, &mx0, &x0, true);
515
NMA_FALSE(&mx0, &mx0, true, &x0, &mx0, true);
516
NMA_FALSE(&mx0, &mx0, true, &x0, &x0, true);
517
NMA_FALSE(&mx0, &mx0, true, &x1, &x1, true);
518
519
/* Checks file exchange with directory access rights... */
520
NMA_TRUE(&mx0, &mx0, false, &mx0, &mx0, false);
521
NMA_TRUE(&mx0, &mx0, false, &mx0, &x0, false);
522
NMA_TRUE(&mx0, &mx0, false, &x0, &mx0, false);
523
NMA_TRUE(&mx0, &mx0, false, &x0, &x0, false);
524
/* ...and with file access rights. */
525
NMA_TRUE(&rx0, &rx0, false, &rx0, &rx0, false);
526
NMA_TRUE(&rx0, &rx0, false, &rx0, &x0, false);
527
NMA_FALSE(&rx0, &rx0, false, &x0, &rx0, false);
528
NMA_FALSE(&rx0, &rx0, false, &x0, &x0, false);
529
NMA_FALSE(&rx0, &rx0, false, &x1, &x1, false);
530
531
/*
532
* Allowing the following requests should not be a security risk
533
* because domain 0 denies execute access, and domain 1 is always
534
* nested with domain 0. However, adding an exception for this case
535
* would mean to check all nested domains to make sure none can get
536
* more privileges (e.g. processes only sandboxed by domain 0).
537
* Moreover, this behavior (i.e. composition of N domains) could then
538
* be inconsistent compared to domain 1's ruleset alone (e.g. it might
539
* be denied to link/rename with domain 1's ruleset, whereas it would
540
* be allowed if nested on top of domain 0). Another drawback would be
541
* to create a cover channel that could enable sandboxed processes to
542
* infer most of the filesystem restrictions from their domain. To
543
* make it simple, efficient, safe, and more consistent, this case is
544
* always denied.
545
*/
546
NMA_FALSE(&x1, &x1, false, &x0, NULL, false);
547
NMA_FALSE(&x1, &x1, false, &rx0, NULL, false);
548
NMA_FALSE(&x1, &x1, true, &x0, NULL, false);
549
NMA_FALSE(&x1, &x1, true, &rx0, NULL, false);
550
551
/* Checks the same case of exclusive domains with a file... */
552
NMA_TRUE(&x1, &x1, false, &x01, NULL, false);
553
NMA_FALSE(&x1, &x1, false, &x01, &x0, false);
554
NMA_FALSE(&x1, &x1, false, &x01, &x01, false);
555
NMA_FALSE(&x1, &x1, false, &x0, &x0, false);
556
/* ...and with a directory. */
557
NMA_FALSE(&x1, &x1, false, &x0, &x0, true);
558
NMA_FALSE(&x1, &x1, true, &x0, &x0, false);
559
NMA_FALSE(&x1, &x1, true, &x0, &x0, true);
560
}
561
562
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
563
564
#undef NMA_TRUE
565
#undef NMA_FALSE
566
567
static bool is_layer_masks_allowed(
568
layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
569
{
570
return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
571
}
572
573
/*
574
* Removes @layer_masks accesses that are not requested.
575
*
576
* Returns true if the request is allowed, false otherwise.
577
*/
578
static bool
579
scope_to_request(const access_mask_t access_request,
580
layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
581
{
582
const unsigned long access_req = access_request;
583
unsigned long access_bit;
584
585
if (WARN_ON_ONCE(!layer_masks))
586
return true;
587
588
for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
589
(*layer_masks)[access_bit] = 0;
590
591
return is_layer_masks_allowed(layer_masks);
592
}
593
594
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
595
596
static void test_scope_to_request_with_exec_none(struct kunit *const test)
597
{
598
/* Allows everything. */
599
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
600
601
/* Checks and scopes with execute. */
602
KUNIT_EXPECT_TRUE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
603
&layer_masks));
604
KUNIT_EXPECT_EQ(test, 0,
605
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
606
KUNIT_EXPECT_EQ(test, 0,
607
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
608
}
609
610
static void test_scope_to_request_with_exec_some(struct kunit *const test)
611
{
612
/* Denies execute and write. */
613
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
614
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
615
[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
616
};
617
618
/* Checks and scopes with execute. */
619
KUNIT_EXPECT_FALSE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
620
&layer_masks));
621
KUNIT_EXPECT_EQ(test, BIT_ULL(0),
622
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
623
KUNIT_EXPECT_EQ(test, 0,
624
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
625
}
626
627
static void test_scope_to_request_without_access(struct kunit *const test)
628
{
629
/* Denies execute and write. */
630
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
631
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
632
[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
633
};
634
635
/* Checks and scopes without access request. */
636
KUNIT_EXPECT_TRUE(test, scope_to_request(0, &layer_masks));
637
KUNIT_EXPECT_EQ(test, 0,
638
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
639
KUNIT_EXPECT_EQ(test, 0,
640
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
641
}
642
643
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
644
645
/*
646
* Returns true if there is at least one access right different than
647
* LANDLOCK_ACCESS_FS_REFER.
648
*/
649
static bool
650
is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
651
const access_mask_t access_request)
652
{
653
unsigned long access_bit;
654
/* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */
655
const unsigned long access_check = access_request &
656
~LANDLOCK_ACCESS_FS_REFER;
657
658
if (!layer_masks)
659
return false;
660
661
for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) {
662
if ((*layer_masks)[access_bit])
663
return true;
664
}
665
return false;
666
}
667
668
#define IE_TRUE(...) KUNIT_EXPECT_TRUE(test, is_eacces(__VA_ARGS__))
669
#define IE_FALSE(...) KUNIT_EXPECT_FALSE(test, is_eacces(__VA_ARGS__))
670
671
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
672
673
static void test_is_eacces_with_none(struct kunit *const test)
674
{
675
const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
676
677
IE_FALSE(&layer_masks, 0);
678
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
679
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
680
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
681
}
682
683
static void test_is_eacces_with_refer(struct kunit *const test)
684
{
685
const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
686
[BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = BIT_ULL(0),
687
};
688
689
IE_FALSE(&layer_masks, 0);
690
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
691
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
692
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
693
}
694
695
static void test_is_eacces_with_write(struct kunit *const test)
696
{
697
const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
698
[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(0),
699
};
700
701
IE_FALSE(&layer_masks, 0);
702
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
703
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
704
705
IE_TRUE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
706
}
707
708
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
709
710
#undef IE_TRUE
711
#undef IE_FALSE
712
713
/**
714
* is_access_to_paths_allowed - Check accesses for requests with a common path
715
*
716
* @domain: Domain to check against.
717
* @path: File hierarchy to walk through. For refer checks, this would be
718
* the common mountpoint.
719
* @access_request_parent1: Accesses to check, once @layer_masks_parent1 is
720
* equal to @layer_masks_parent2 (if any). This is tied to the unique
721
* requested path for most actions, or the source in case of a refer action
722
* (i.e. rename or link), or the source and destination in case of
723
* RENAME_EXCHANGE.
724
* @layer_masks_parent1: Pointer to a matrix of layer masks per access
725
* masks, identifying the layers that forbid a specific access. Bits from
726
* this matrix can be unset according to the @path walk. An empty matrix
727
* means that @domain allows all possible Landlock accesses (i.e. not only
728
* those identified by @access_request_parent1). This matrix can
729
* initially refer to domain layer masks and, when the accesses for the
730
* destination and source are the same, to requested layer masks.
731
* @log_request_parent1: Audit request to fill if the related access is denied.
732
* @dentry_child1: Dentry to the initial child of the parent1 path. This
733
* pointer must be NULL for non-refer actions (i.e. not link nor rename).
734
* @access_request_parent2: Similar to @access_request_parent1 but for a
735
* request involving a source and a destination. This refers to the
736
* destination, except in case of RENAME_EXCHANGE where it also refers to
737
* the source. Must be set to 0 when using a simple path request.
738
* @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer
739
* action. This must be NULL otherwise.
740
* @log_request_parent2: Audit request to fill if the related access is denied.
741
* @dentry_child2: Dentry to the initial child of the parent2 path. This
742
* pointer is only set for RENAME_EXCHANGE actions and must be NULL
743
* otherwise.
744
*
745
* This helper first checks that the destination has a superset of restrictions
746
* compared to the source (if any) for a common path. Because of
747
* RENAME_EXCHANGE actions, source and destinations may be swapped. It then
748
* checks that the collected accesses and the remaining ones are enough to
749
* allow the request.
750
*
751
* Returns:
752
* - true if the access request is granted;
753
* - false otherwise.
754
*/
755
static bool is_access_to_paths_allowed(
756
const struct landlock_ruleset *const domain,
757
const struct path *const path,
758
const access_mask_t access_request_parent1,
759
layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
760
struct landlock_request *const log_request_parent1,
761
struct dentry *const dentry_child1,
762
const access_mask_t access_request_parent2,
763
layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
764
struct landlock_request *const log_request_parent2,
765
struct dentry *const dentry_child2)
766
{
767
bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
768
child1_is_directory = true, child2_is_directory = true;
769
struct path walker_path;
770
access_mask_t access_masked_parent1, access_masked_parent2;
771
layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS],
772
_layer_masks_child2[LANDLOCK_NUM_ACCESS_FS];
773
layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL,
774
(*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL;
775
776
if (!access_request_parent1 && !access_request_parent2)
777
return true;
778
779
if (WARN_ON_ONCE(!path))
780
return true;
781
782
if (is_nouser_or_private(path->dentry))
783
return true;
784
785
if (WARN_ON_ONCE(!layer_masks_parent1))
786
return false;
787
788
allowed_parent1 = is_layer_masks_allowed(layer_masks_parent1);
789
790
if (unlikely(layer_masks_parent2)) {
791
if (WARN_ON_ONCE(!dentry_child1))
792
return false;
793
794
allowed_parent2 = is_layer_masks_allowed(layer_masks_parent2);
795
796
/*
797
* For a double request, first check for potential privilege
798
* escalation by looking at domain handled accesses (which are
799
* a superset of the meaningful requested accesses).
800
*/
801
access_masked_parent1 = access_masked_parent2 =
802
landlock_union_access_masks(domain).fs;
803
is_dom_check = true;
804
} else {
805
if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
806
return false;
807
/* For a simple request, only check for requested accesses. */
808
access_masked_parent1 = access_request_parent1;
809
access_masked_parent2 = access_request_parent2;
810
is_dom_check = false;
811
}
812
813
if (unlikely(dentry_child1)) {
814
landlock_unmask_layers(
815
find_rule(domain, dentry_child1),
816
landlock_init_layer_masks(
817
domain, LANDLOCK_MASK_ACCESS_FS,
818
&_layer_masks_child1, LANDLOCK_KEY_INODE),
819
&_layer_masks_child1, ARRAY_SIZE(_layer_masks_child1));
820
layer_masks_child1 = &_layer_masks_child1;
821
child1_is_directory = d_is_dir(dentry_child1);
822
}
823
if (unlikely(dentry_child2)) {
824
landlock_unmask_layers(
825
find_rule(domain, dentry_child2),
826
landlock_init_layer_masks(
827
domain, LANDLOCK_MASK_ACCESS_FS,
828
&_layer_masks_child2, LANDLOCK_KEY_INODE),
829
&_layer_masks_child2, ARRAY_SIZE(_layer_masks_child2));
830
layer_masks_child2 = &_layer_masks_child2;
831
child2_is_directory = d_is_dir(dentry_child2);
832
}
833
834
walker_path = *path;
835
path_get(&walker_path);
836
/*
837
* We need to walk through all the hierarchy to not miss any relevant
838
* restriction.
839
*/
840
while (true) {
841
const struct landlock_rule *rule;
842
843
/*
844
* If at least all accesses allowed on the destination are
845
* already allowed on the source, respectively if there is at
846
* least as much as restrictions on the destination than on the
847
* source, then we can safely refer files from the source to
848
* the destination without risking a privilege escalation.
849
* This also applies in the case of RENAME_EXCHANGE, which
850
* implies checks on both direction. This is crucial for
851
* standalone multilayered security policies. Furthermore,
852
* this helps avoid policy writers to shoot themselves in the
853
* foot.
854
*/
855
if (unlikely(is_dom_check &&
856
no_more_access(
857
layer_masks_parent1, layer_masks_child1,
858
child1_is_directory, layer_masks_parent2,
859
layer_masks_child2,
860
child2_is_directory))) {
861
/*
862
* Now, downgrades the remaining checks from domain
863
* handled accesses to requested accesses.
864
*/
865
is_dom_check = false;
866
access_masked_parent1 = access_request_parent1;
867
access_masked_parent2 = access_request_parent2;
868
869
allowed_parent1 =
870
allowed_parent1 ||
871
scope_to_request(access_masked_parent1,
872
layer_masks_parent1);
873
allowed_parent2 =
874
allowed_parent2 ||
875
scope_to_request(access_masked_parent2,
876
layer_masks_parent2);
877
878
/* Stops when all accesses are granted. */
879
if (allowed_parent1 && allowed_parent2)
880
break;
881
}
882
883
rule = find_rule(domain, walker_path.dentry);
884
allowed_parent1 = allowed_parent1 ||
885
landlock_unmask_layers(
886
rule, access_masked_parent1,
887
layer_masks_parent1,
888
ARRAY_SIZE(*layer_masks_parent1));
889
allowed_parent2 = allowed_parent2 ||
890
landlock_unmask_layers(
891
rule, access_masked_parent2,
892
layer_masks_parent2,
893
ARRAY_SIZE(*layer_masks_parent2));
894
895
/* Stops when a rule from each layer grants access. */
896
if (allowed_parent1 && allowed_parent2)
897
break;
898
899
jump_up:
900
if (walker_path.dentry == walker_path.mnt->mnt_root) {
901
if (follow_up(&walker_path)) {
902
/* Ignores hidden mount points. */
903
goto jump_up;
904
} else {
905
/*
906
* Stops at the real root. Denies access
907
* because not all layers have granted access.
908
*/
909
break;
910
}
911
}
912
913
if (unlikely(IS_ROOT(walker_path.dentry))) {
914
if (likely(walker_path.mnt->mnt_flags & MNT_INTERNAL)) {
915
/*
916
* Stops and allows access when reaching disconnected root
917
* directories that are part of internal filesystems (e.g. nsfs,
918
* which is reachable through /proc/<pid>/ns/<namespace>).
919
*/
920
allowed_parent1 = true;
921
allowed_parent2 = true;
922
break;
923
}
924
925
/*
926
* We reached a disconnected root directory from a bind mount.
927
* Let's continue the walk with the mount point we missed.
928
*/
929
dput(walker_path.dentry);
930
walker_path.dentry = walker_path.mnt->mnt_root;
931
dget(walker_path.dentry);
932
} else {
933
struct dentry *const parent_dentry =
934
dget_parent(walker_path.dentry);
935
936
dput(walker_path.dentry);
937
walker_path.dentry = parent_dentry;
938
}
939
}
940
path_put(&walker_path);
941
942
/*
943
* Check CONFIG_AUDIT to enable elision of log_request_parent* and
944
* associated caller's stack variables thanks to dead code elimination.
945
*/
946
#ifdef CONFIG_AUDIT
947
if (!allowed_parent1 && log_request_parent1) {
948
log_request_parent1->type = LANDLOCK_REQUEST_FS_ACCESS;
949
log_request_parent1->audit.type = LSM_AUDIT_DATA_PATH;
950
log_request_parent1->audit.u.path = *path;
951
log_request_parent1->access = access_masked_parent1;
952
log_request_parent1->layer_masks = layer_masks_parent1;
953
log_request_parent1->layer_masks_size =
954
ARRAY_SIZE(*layer_masks_parent1);
955
}
956
957
if (!allowed_parent2 && log_request_parent2) {
958
log_request_parent2->type = LANDLOCK_REQUEST_FS_ACCESS;
959
log_request_parent2->audit.type = LSM_AUDIT_DATA_PATH;
960
log_request_parent2->audit.u.path = *path;
961
log_request_parent2->access = access_masked_parent2;
962
log_request_parent2->layer_masks = layer_masks_parent2;
963
log_request_parent2->layer_masks_size =
964
ARRAY_SIZE(*layer_masks_parent2);
965
}
966
#endif /* CONFIG_AUDIT */
967
968
return allowed_parent1 && allowed_parent2;
969
}
970
971
static int current_check_access_path(const struct path *const path,
972
access_mask_t access_request)
973
{
974
const struct access_masks masks = {
975
.fs = access_request,
976
};
977
const struct landlock_cred_security *const subject =
978
landlock_get_applicable_subject(current_cred(), masks, NULL);
979
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
980
struct landlock_request request = {};
981
982
if (!subject)
983
return 0;
984
985
access_request = landlock_init_layer_masks(subject->domain,
986
access_request, &layer_masks,
987
LANDLOCK_KEY_INODE);
988
if (is_access_to_paths_allowed(subject->domain, path, access_request,
989
&layer_masks, &request, NULL, 0, NULL,
990
NULL, NULL))
991
return 0;
992
993
landlock_log_denial(subject, &request);
994
return -EACCES;
995
}
996
997
static __attribute_const__ access_mask_t get_mode_access(const umode_t mode)
998
{
999
switch (mode & S_IFMT) {
1000
case S_IFLNK:
1001
return LANDLOCK_ACCESS_FS_MAKE_SYM;
1002
case S_IFDIR:
1003
return LANDLOCK_ACCESS_FS_MAKE_DIR;
1004
case S_IFCHR:
1005
return LANDLOCK_ACCESS_FS_MAKE_CHAR;
1006
case S_IFBLK:
1007
return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
1008
case S_IFIFO:
1009
return LANDLOCK_ACCESS_FS_MAKE_FIFO;
1010
case S_IFSOCK:
1011
return LANDLOCK_ACCESS_FS_MAKE_SOCK;
1012
case S_IFREG:
1013
case 0:
1014
/* A zero mode translates to S_IFREG. */
1015
default:
1016
/* Treats weird files as regular files. */
1017
return LANDLOCK_ACCESS_FS_MAKE_REG;
1018
}
1019
}
1020
1021
static access_mask_t maybe_remove(const struct dentry *const dentry)
1022
{
1023
if (d_is_negative(dentry))
1024
return 0;
1025
return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
1026
LANDLOCK_ACCESS_FS_REMOVE_FILE;
1027
}
1028
1029
/**
1030
* collect_domain_accesses - Walk through a file path and collect accesses
1031
*
1032
* @domain: Domain to check against.
1033
* @mnt_root: Last directory to check.
1034
* @dir: Directory to start the walk from.
1035
* @layer_masks_dom: Where to store the collected accesses.
1036
*
1037
* This helper is useful to begin a path walk from the @dir directory to a
1038
* @mnt_root directory used as a mount point. This mount point is the common
1039
* ancestor between the source and the destination of a renamed and linked
1040
* file. While walking from @dir to @mnt_root, we record all the domain's
1041
* allowed accesses in @layer_masks_dom.
1042
*
1043
* Because of disconnected directories, this walk may not reach @mnt_dir. In
1044
* this case, the walk will continue to @mnt_dir after this call.
1045
*
1046
* This is similar to is_access_to_paths_allowed() but much simpler because it
1047
* only handles walking on the same mount point and only checks one set of
1048
* accesses.
1049
*
1050
* Returns:
1051
* - true if all the domain access rights are allowed for @dir;
1052
* - false if the walk reached @mnt_root.
1053
*/
1054
static bool collect_domain_accesses(
1055
const struct landlock_ruleset *const domain,
1056
const struct dentry *const mnt_root, struct dentry *dir,
1057
layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])
1058
{
1059
unsigned long access_dom;
1060
bool ret = false;
1061
1062
if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom))
1063
return true;
1064
if (is_nouser_or_private(dir))
1065
return true;
1066
1067
access_dom = landlock_init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
1068
layer_masks_dom,
1069
LANDLOCK_KEY_INODE);
1070
1071
dget(dir);
1072
while (true) {
1073
struct dentry *parent_dentry;
1074
1075
/* Gets all layers allowing all domain accesses. */
1076
if (landlock_unmask_layers(find_rule(domain, dir), access_dom,
1077
layer_masks_dom,
1078
ARRAY_SIZE(*layer_masks_dom))) {
1079
/*
1080
* Stops when all handled accesses are allowed by at
1081
* least one rule in each layer.
1082
*/
1083
ret = true;
1084
break;
1085
}
1086
1087
/*
1088
* Stops at the mount point or the filesystem root for a disconnected
1089
* directory.
1090
*/
1091
if (dir == mnt_root || unlikely(IS_ROOT(dir)))
1092
break;
1093
1094
parent_dentry = dget_parent(dir);
1095
dput(dir);
1096
dir = parent_dentry;
1097
}
1098
dput(dir);
1099
return ret;
1100
}
1101
1102
/**
1103
* current_check_refer_path - Check if a rename or link action is allowed
1104
*
1105
* @old_dentry: File or directory requested to be moved or linked.
1106
* @new_dir: Destination parent directory.
1107
* @new_dentry: Destination file or directory.
1108
* @removable: Sets to true if it is a rename operation.
1109
* @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE.
1110
*
1111
* Because of its unprivileged constraints, Landlock relies on file hierarchies
1112
* (and not only inodes) to tie access rights to files. Being able to link or
1113
* rename a file hierarchy brings some challenges. Indeed, moving or linking a
1114
* file (i.e. creating a new reference to an inode) can have an impact on the
1115
* actions allowed for a set of files if it would change its parent directory
1116
* (i.e. reparenting).
1117
*
1118
* To avoid trivial access right bypasses, Landlock first checks if the file or
1119
* directory requested to be moved would gain new access rights inherited from
1120
* its new hierarchy. Before returning any error, Landlock then checks that
1121
* the parent source hierarchy and the destination hierarchy would allow the
1122
* link or rename action. If it is not the case, an error with EACCES is
1123
* returned to inform user space that there is no way to remove or create the
1124
* requested source file type. If it should be allowed but the new inherited
1125
* access rights would be greater than the source access rights, then the
1126
* kernel returns an error with EXDEV. Prioritizing EACCES over EXDEV enables
1127
* user space to abort the whole operation if there is no way to do it, or to
1128
* manually copy the source to the destination if this remains allowed, e.g.
1129
* because file creation is allowed on the destination directory but not direct
1130
* linking.
1131
*
1132
* To achieve this goal, the kernel needs to compare two file hierarchies: the
1133
* one identifying the source file or directory (including itself), and the
1134
* destination one. This can be seen as a multilayer partial ordering problem.
1135
* The kernel walks through these paths and collects in a matrix the access
1136
* rights that are denied per layer. These matrices are then compared to see
1137
* if the destination one has more (or the same) restrictions as the source
1138
* one. If this is the case, the requested action will not return EXDEV, which
1139
* doesn't mean the action is allowed. The parent hierarchy of the source
1140
* (i.e. parent directory), and the destination hierarchy must also be checked
1141
* to verify that they explicitly allow such action (i.e. referencing,
1142
* creation and potentially removal rights). The kernel implementation is then
1143
* required to rely on potentially four matrices of access rights: one for the
1144
* source file or directory (i.e. the child), a potentially other one for the
1145
* other source/destination (in case of RENAME_EXCHANGE), one for the source
1146
* parent hierarchy and a last one for the destination hierarchy. These
1147
* ephemeral matrices take some space on the stack, which limits the number of
1148
* layers to a deemed reasonable number: 16.
1149
*
1150
* Returns:
1151
* - 0 if access is allowed;
1152
* - -EXDEV if @old_dentry would inherit new access rights from @new_dir;
1153
* - -EACCES if file removal or creation is denied.
1154
*/
1155
static int current_check_refer_path(struct dentry *const old_dentry,
1156
const struct path *const new_dir,
1157
struct dentry *const new_dentry,
1158
const bool removable, const bool exchange)
1159
{
1160
const struct landlock_cred_security *const subject =
1161
landlock_get_applicable_subject(current_cred(), any_fs, NULL);
1162
bool allow_parent1, allow_parent2;
1163
access_mask_t access_request_parent1, access_request_parent2;
1164
struct path mnt_dir;
1165
struct dentry *old_parent;
1166
layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
1167
layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
1168
struct landlock_request request1 = {}, request2 = {};
1169
1170
if (!subject)
1171
return 0;
1172
1173
if (unlikely(d_is_negative(old_dentry)))
1174
return -ENOENT;
1175
if (exchange) {
1176
if (unlikely(d_is_negative(new_dentry)))
1177
return -ENOENT;
1178
access_request_parent1 =
1179
get_mode_access(d_backing_inode(new_dentry)->i_mode);
1180
} else {
1181
access_request_parent1 = 0;
1182
}
1183
access_request_parent2 =
1184
get_mode_access(d_backing_inode(old_dentry)->i_mode);
1185
if (removable) {
1186
access_request_parent1 |= maybe_remove(old_dentry);
1187
access_request_parent2 |= maybe_remove(new_dentry);
1188
}
1189
1190
/* The mount points are the same for old and new paths, cf. EXDEV. */
1191
if (old_dentry->d_parent == new_dir->dentry) {
1192
/*
1193
* The LANDLOCK_ACCESS_FS_REFER access right is not required
1194
* for same-directory referer (i.e. no reparenting).
1195
*/
1196
access_request_parent1 = landlock_init_layer_masks(
1197
subject->domain,
1198
access_request_parent1 | access_request_parent2,
1199
&layer_masks_parent1, LANDLOCK_KEY_INODE);
1200
if (is_access_to_paths_allowed(subject->domain, new_dir,
1201
access_request_parent1,
1202
&layer_masks_parent1, &request1,
1203
NULL, 0, NULL, NULL, NULL))
1204
return 0;
1205
1206
landlock_log_denial(subject, &request1);
1207
return -EACCES;
1208
}
1209
1210
access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
1211
access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
1212
1213
/* Saves the common mount point. */
1214
mnt_dir.mnt = new_dir->mnt;
1215
mnt_dir.dentry = new_dir->mnt->mnt_root;
1216
1217
/*
1218
* old_dentry may be the root of the common mount point and
1219
* !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
1220
* OPEN_TREE_CLONE). We do not need to call dget(old_parent) because
1221
* we keep a reference to old_dentry.
1222
*/
1223
old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
1224
old_dentry->d_parent;
1225
1226
/* new_dir->dentry is equal to new_dentry->d_parent */
1227
allow_parent1 = collect_domain_accesses(subject->domain, mnt_dir.dentry,
1228
old_parent,
1229
&layer_masks_parent1);
1230
allow_parent2 = collect_domain_accesses(subject->domain, mnt_dir.dentry,
1231
new_dir->dentry,
1232
&layer_masks_parent2);
1233
1234
if (allow_parent1 && allow_parent2)
1235
return 0;
1236
1237
/*
1238
* To be able to compare source and destination domain access rights,
1239
* take into account the @old_dentry access rights aggregated with its
1240
* parent access rights. This will be useful to compare with the
1241
* destination parent access rights.
1242
*/
1243
if (is_access_to_paths_allowed(
1244
subject->domain, &mnt_dir, access_request_parent1,
1245
&layer_masks_parent1, &request1, old_dentry,
1246
access_request_parent2, &layer_masks_parent2, &request2,
1247
exchange ? new_dentry : NULL))
1248
return 0;
1249
1250
if (request1.access) {
1251
request1.audit.u.path.dentry = old_parent;
1252
landlock_log_denial(subject, &request1);
1253
}
1254
if (request2.access) {
1255
request2.audit.u.path.dentry = new_dir->dentry;
1256
landlock_log_denial(subject, &request2);
1257
}
1258
1259
/*
1260
* This prioritizes EACCES over EXDEV for all actions, including
1261
* renames with RENAME_EXCHANGE.
1262
*/
1263
if (likely(is_eacces(&layer_masks_parent1, access_request_parent1) ||
1264
is_eacces(&layer_masks_parent2, access_request_parent2)))
1265
return -EACCES;
1266
1267
/*
1268
* Gracefully forbids reparenting if the destination directory
1269
* hierarchy is not a superset of restrictions of the source directory
1270
* hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the
1271
* source or the destination.
1272
*/
1273
return -EXDEV;
1274
}
1275
1276
/* Inode hooks */
1277
1278
static void hook_inode_free_security_rcu(void *inode_security)
1279
{
1280
struct landlock_inode_security *inode_sec;
1281
1282
/*
1283
* All inodes must already have been untied from their object by
1284
* release_inode() or hook_sb_delete().
1285
*/
1286
inode_sec = inode_security + landlock_blob_sizes.lbs_inode;
1287
WARN_ON_ONCE(inode_sec->object);
1288
}
1289
1290
/* Super-block hooks */
1291
1292
/*
1293
* Release the inodes used in a security policy.
1294
*
1295
* Cf. fsnotify_unmount_inodes() and evict_inodes()
1296
*/
1297
static void hook_sb_delete(struct super_block *const sb)
1298
{
1299
struct inode *inode, *prev_inode = NULL;
1300
1301
if (!landlock_initialized)
1302
return;
1303
1304
spin_lock(&sb->s_inode_list_lock);
1305
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1306
struct landlock_object *object;
1307
1308
/* Only handles referenced inodes. */
1309
if (!icount_read(inode))
1310
continue;
1311
1312
/*
1313
* Protects against concurrent modification of inode (e.g.
1314
* from get_inode_object()).
1315
*/
1316
spin_lock(&inode->i_lock);
1317
/*
1318
* Checks I_FREEING and I_WILL_FREE to protect against a race
1319
* condition when release_inode() just called iput(), which
1320
* could lead to a NULL dereference of inode->security or a
1321
* second call to iput() for the same Landlock object. Also
1322
* checks I_NEW because such inode cannot be tied to an object.
1323
*/
1324
if (inode_state_read(inode) &
1325
(I_FREEING | I_WILL_FREE | I_NEW)) {
1326
spin_unlock(&inode->i_lock);
1327
continue;
1328
}
1329
1330
rcu_read_lock();
1331
object = rcu_dereference(landlock_inode(inode)->object);
1332
if (!object) {
1333
rcu_read_unlock();
1334
spin_unlock(&inode->i_lock);
1335
continue;
1336
}
1337
/* Keeps a reference to this inode until the next loop walk. */
1338
__iget(inode);
1339
spin_unlock(&inode->i_lock);
1340
1341
/*
1342
* If there is no concurrent release_inode() ongoing, then we
1343
* are in charge of calling iput() on this inode, otherwise we
1344
* will just wait for it to finish.
1345
*/
1346
spin_lock(&object->lock);
1347
if (object->underobj == inode) {
1348
object->underobj = NULL;
1349
spin_unlock(&object->lock);
1350
rcu_read_unlock();
1351
1352
/*
1353
* Because object->underobj was not NULL,
1354
* release_inode() and get_inode_object() guarantee
1355
* that it is safe to reset
1356
* landlock_inode(inode)->object while it is not NULL.
1357
* It is therefore not necessary to lock inode->i_lock.
1358
*/
1359
rcu_assign_pointer(landlock_inode(inode)->object, NULL);
1360
/*
1361
* At this point, we own the ihold() reference that was
1362
* originally set up by get_inode_object() and the
1363
* __iget() reference that we just set in this loop
1364
* walk. Therefore there are at least two references
1365
* on the inode.
1366
*/
1367
iput_not_last(inode);
1368
} else {
1369
spin_unlock(&object->lock);
1370
rcu_read_unlock();
1371
}
1372
1373
if (prev_inode) {
1374
/*
1375
* At this point, we still own the __iget() reference
1376
* that we just set in this loop walk. Therefore we
1377
* can drop the list lock and know that the inode won't
1378
* disappear from under us until the next loop walk.
1379
*/
1380
spin_unlock(&sb->s_inode_list_lock);
1381
/*
1382
* We can now actually put the inode reference from the
1383
* previous loop walk, which is not needed anymore.
1384
*/
1385
iput(prev_inode);
1386
cond_resched();
1387
spin_lock(&sb->s_inode_list_lock);
1388
}
1389
prev_inode = inode;
1390
}
1391
spin_unlock(&sb->s_inode_list_lock);
1392
1393
/* Puts the inode reference from the last loop walk, if any. */
1394
if (prev_inode)
1395
iput(prev_inode);
1396
/* Waits for pending iput() in release_inode(). */
1397
wait_var_event(&landlock_superblock(sb)->inode_refs,
1398
!atomic_long_read(&landlock_superblock(sb)->inode_refs));
1399
}
1400
1401
static void
1402
log_fs_change_topology_path(const struct landlock_cred_security *const subject,
1403
size_t handle_layer, const struct path *const path)
1404
{
1405
landlock_log_denial(subject, &(struct landlock_request) {
1406
.type = LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY,
1407
.audit = {
1408
.type = LSM_AUDIT_DATA_PATH,
1409
.u.path = *path,
1410
},
1411
.layer_plus_one = handle_layer + 1,
1412
});
1413
}
1414
1415
static void log_fs_change_topology_dentry(
1416
const struct landlock_cred_security *const subject, size_t handle_layer,
1417
struct dentry *const dentry)
1418
{
1419
landlock_log_denial(subject, &(struct landlock_request) {
1420
.type = LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY,
1421
.audit = {
1422
.type = LSM_AUDIT_DATA_DENTRY,
1423
.u.dentry = dentry,
1424
},
1425
.layer_plus_one = handle_layer + 1,
1426
});
1427
}
1428
1429
/*
1430
* Because a Landlock security policy is defined according to the filesystem
1431
* topology (i.e. the mount namespace), changing it may grant access to files
1432
* not previously allowed.
1433
*
1434
* To make it simple, deny any filesystem topology modification by landlocked
1435
* processes. Non-landlocked processes may still change the namespace of a
1436
* landlocked process, but this kind of threat must be handled by a system-wide
1437
* access-control security policy.
1438
*
1439
* This could be lifted in the future if Landlock can safely handle mount
1440
* namespace updates requested by a landlocked process. Indeed, we could
1441
* update the current domain (which is currently read-only) by taking into
1442
* account the accesses of the source and the destination of a new mount point.
1443
* However, it would also require to make all the child domains dynamically
1444
* inherit these new constraints. Anyway, for backward compatibility reasons,
1445
* a dedicated user space option would be required (e.g. as a ruleset flag).
1446
*/
1447
static int hook_sb_mount(const char *const dev_name,
1448
const struct path *const path, const char *const type,
1449
const unsigned long flags, void *const data)
1450
{
1451
size_t handle_layer;
1452
const struct landlock_cred_security *const subject =
1453
landlock_get_applicable_subject(current_cred(), any_fs,
1454
&handle_layer);
1455
1456
if (!subject)
1457
return 0;
1458
1459
log_fs_change_topology_path(subject, handle_layer, path);
1460
return -EPERM;
1461
}
1462
1463
static int hook_move_mount(const struct path *const from_path,
1464
const struct path *const to_path)
1465
{
1466
size_t handle_layer;
1467
const struct landlock_cred_security *const subject =
1468
landlock_get_applicable_subject(current_cred(), any_fs,
1469
&handle_layer);
1470
1471
if (!subject)
1472
return 0;
1473
1474
log_fs_change_topology_path(subject, handle_layer, to_path);
1475
return -EPERM;
1476
}
1477
1478
/*
1479
* Removing a mount point may reveal a previously hidden file hierarchy, which
1480
* may then grant access to files, which may have previously been forbidden.
1481
*/
1482
static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
1483
{
1484
size_t handle_layer;
1485
const struct landlock_cred_security *const subject =
1486
landlock_get_applicable_subject(current_cred(), any_fs,
1487
&handle_layer);
1488
1489
if (!subject)
1490
return 0;
1491
1492
log_fs_change_topology_dentry(subject, handle_layer, mnt->mnt_root);
1493
return -EPERM;
1494
}
1495
1496
static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
1497
{
1498
size_t handle_layer;
1499
const struct landlock_cred_security *const subject =
1500
landlock_get_applicable_subject(current_cred(), any_fs,
1501
&handle_layer);
1502
1503
if (!subject)
1504
return 0;
1505
1506
log_fs_change_topology_dentry(subject, handle_layer, sb->s_root);
1507
return -EPERM;
1508
}
1509
1510
/*
1511
* pivot_root(2), like mount(2), changes the current mount namespace. It must
1512
* then be forbidden for a landlocked process.
1513
*
1514
* However, chroot(2) may be allowed because it only changes the relative root
1515
* directory of the current process. Moreover, it can be used to restrict the
1516
* view of the filesystem.
1517
*/
1518
static int hook_sb_pivotroot(const struct path *const old_path,
1519
const struct path *const new_path)
1520
{
1521
size_t handle_layer;
1522
const struct landlock_cred_security *const subject =
1523
landlock_get_applicable_subject(current_cred(), any_fs,
1524
&handle_layer);
1525
1526
if (!subject)
1527
return 0;
1528
1529
log_fs_change_topology_path(subject, handle_layer, new_path);
1530
return -EPERM;
1531
}
1532
1533
/* Path hooks */
1534
1535
static int hook_path_link(struct dentry *const old_dentry,
1536
const struct path *const new_dir,
1537
struct dentry *const new_dentry)
1538
{
1539
return current_check_refer_path(old_dentry, new_dir, new_dentry, false,
1540
false);
1541
}
1542
1543
static int hook_path_rename(const struct path *const old_dir,
1544
struct dentry *const old_dentry,
1545
const struct path *const new_dir,
1546
struct dentry *const new_dentry,
1547
const unsigned int flags)
1548
{
1549
/* old_dir refers to old_dentry->d_parent and new_dir->mnt */
1550
return current_check_refer_path(old_dentry, new_dir, new_dentry, true,
1551
!!(flags & RENAME_EXCHANGE));
1552
}
1553
1554
static int hook_path_mkdir(const struct path *const dir,
1555
struct dentry *const dentry, const umode_t mode)
1556
{
1557
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
1558
}
1559
1560
static int hook_path_mknod(const struct path *const dir,
1561
struct dentry *const dentry, const umode_t mode,
1562
const unsigned int dev)
1563
{
1564
return current_check_access_path(dir, get_mode_access(mode));
1565
}
1566
1567
static int hook_path_symlink(const struct path *const dir,
1568
struct dentry *const dentry,
1569
const char *const old_name)
1570
{
1571
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
1572
}
1573
1574
static int hook_path_unlink(const struct path *const dir,
1575
struct dentry *const dentry)
1576
{
1577
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
1578
}
1579
1580
static int hook_path_rmdir(const struct path *const dir,
1581
struct dentry *const dentry)
1582
{
1583
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
1584
}
1585
1586
static int hook_path_truncate(const struct path *const path)
1587
{
1588
return current_check_access_path(path, LANDLOCK_ACCESS_FS_TRUNCATE);
1589
}
1590
1591
/* File hooks */
1592
1593
/**
1594
* get_required_file_open_access - Get access needed to open a file
1595
*
1596
* @file: File being opened.
1597
*
1598
* Returns the access rights that are required for opening the given file,
1599
* depending on the file type and open mode.
1600
*/
1601
static access_mask_t
1602
get_required_file_open_access(const struct file *const file)
1603
{
1604
access_mask_t access = 0;
1605
1606
if (file->f_mode & FMODE_READ) {
1607
/* A directory can only be opened in read mode. */
1608
if (S_ISDIR(file_inode(file)->i_mode))
1609
return LANDLOCK_ACCESS_FS_READ_DIR;
1610
access = LANDLOCK_ACCESS_FS_READ_FILE;
1611
}
1612
if (file->f_mode & FMODE_WRITE)
1613
access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
1614
/* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
1615
if (file->f_flags & __FMODE_EXEC)
1616
access |= LANDLOCK_ACCESS_FS_EXECUTE;
1617
return access;
1618
}
1619
1620
static int hook_file_alloc_security(struct file *const file)
1621
{
1622
/*
1623
* Grants all access rights, even if most of them are not checked later
1624
* on. It is more consistent.
1625
*
1626
* Notably, file descriptors for regular files can also be acquired
1627
* without going through the file_open hook, for example when using
1628
* memfd_create(2).
1629
*/
1630
landlock_file(file)->allowed_access = LANDLOCK_MASK_ACCESS_FS;
1631
return 0;
1632
}
1633
1634
static bool is_device(const struct file *const file)
1635
{
1636
const struct inode *inode = file_inode(file);
1637
1638
return S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode);
1639
}
1640
1641
static int hook_file_open(struct file *const file)
1642
{
1643
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
1644
access_mask_t open_access_request, full_access_request, allowed_access,
1645
optional_access;
1646
const struct landlock_cred_security *const subject =
1647
landlock_get_applicable_subject(file->f_cred, any_fs, NULL);
1648
struct landlock_request request = {};
1649
1650
if (!subject)
1651
return 0;
1652
1653
/*
1654
* Because a file may be opened with O_PATH, get_required_file_open_access()
1655
* may return 0. This case will be handled with a future Landlock
1656
* evolution.
1657
*/
1658
open_access_request = get_required_file_open_access(file);
1659
1660
/*
1661
* We look up more access than what we immediately need for open(), so
1662
* that we can later authorize operations on opened files.
1663
*/
1664
optional_access = LANDLOCK_ACCESS_FS_TRUNCATE;
1665
if (is_device(file))
1666
optional_access |= LANDLOCK_ACCESS_FS_IOCTL_DEV;
1667
1668
full_access_request = open_access_request | optional_access;
1669
1670
if (is_access_to_paths_allowed(
1671
subject->domain, &file->f_path,
1672
landlock_init_layer_masks(subject->domain,
1673
full_access_request, &layer_masks,
1674
LANDLOCK_KEY_INODE),
1675
&layer_masks, &request, NULL, 0, NULL, NULL, NULL)) {
1676
allowed_access = full_access_request;
1677
} else {
1678
unsigned long access_bit;
1679
const unsigned long access_req = full_access_request;
1680
1681
/*
1682
* Calculate the actual allowed access rights from layer_masks.
1683
* Add each access right to allowed_access which has not been
1684
* vetoed by any layer.
1685
*/
1686
allowed_access = 0;
1687
for_each_set_bit(access_bit, &access_req,
1688
ARRAY_SIZE(layer_masks)) {
1689
if (!layer_masks[access_bit])
1690
allowed_access |= BIT_ULL(access_bit);
1691
}
1692
}
1693
1694
/*
1695
* For operations on already opened files (i.e. ftruncate()), it is the
1696
* access rights at the time of open() which decide whether the
1697
* operation is permitted. Therefore, we record the relevant subset of
1698
* file access rights in the opened struct file.
1699
*/
1700
landlock_file(file)->allowed_access = allowed_access;
1701
#ifdef CONFIG_AUDIT
1702
landlock_file(file)->deny_masks = landlock_get_deny_masks(
1703
_LANDLOCK_ACCESS_FS_OPTIONAL, optional_access, &layer_masks,
1704
ARRAY_SIZE(layer_masks));
1705
#endif /* CONFIG_AUDIT */
1706
1707
if ((open_access_request & allowed_access) == open_access_request)
1708
return 0;
1709
1710
/* Sets access to reflect the actual request. */
1711
request.access = open_access_request;
1712
landlock_log_denial(subject, &request);
1713
return -EACCES;
1714
}
1715
1716
static int hook_file_truncate(struct file *const file)
1717
{
1718
/*
1719
* Allows truncation if the truncate right was available at the time of
1720
* opening the file, to get a consistent access check as for read, write
1721
* and execute operations.
1722
*
1723
* Note: For checks done based on the file's Landlock allowed access, we
1724
* enforce them independently of whether the current thread is in a
1725
* Landlock domain, so that open files passed between independent
1726
* processes retain their behaviour.
1727
*/
1728
if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE)
1729
return 0;
1730
1731
landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) {
1732
.type = LANDLOCK_REQUEST_FS_ACCESS,
1733
.audit = {
1734
.type = LSM_AUDIT_DATA_FILE,
1735
.u.file = file,
1736
},
1737
.all_existing_optional_access = _LANDLOCK_ACCESS_FS_OPTIONAL,
1738
.access = LANDLOCK_ACCESS_FS_TRUNCATE,
1739
#ifdef CONFIG_AUDIT
1740
.deny_masks = landlock_file(file)->deny_masks,
1741
#endif /* CONFIG_AUDIT */
1742
});
1743
return -EACCES;
1744
}
1745
1746
static int hook_file_ioctl_common(const struct file *const file,
1747
const unsigned int cmd, const bool is_compat)
1748
{
1749
access_mask_t allowed_access = landlock_file(file)->allowed_access;
1750
1751
/*
1752
* It is the access rights at the time of opening the file which
1753
* determine whether IOCTL can be used on the opened file later.
1754
*
1755
* The access right is attached to the opened file in hook_file_open().
1756
*/
1757
if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
1758
return 0;
1759
1760
if (!is_device(file))
1761
return 0;
1762
1763
if (unlikely(is_compat) ? is_masked_device_ioctl_compat(cmd) :
1764
is_masked_device_ioctl(cmd))
1765
return 0;
1766
1767
landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) {
1768
.type = LANDLOCK_REQUEST_FS_ACCESS,
1769
.audit = {
1770
.type = LSM_AUDIT_DATA_IOCTL_OP,
1771
.u.op = &(struct lsm_ioctlop_audit) {
1772
.path = file->f_path,
1773
.cmd = cmd,
1774
},
1775
},
1776
.all_existing_optional_access = _LANDLOCK_ACCESS_FS_OPTIONAL,
1777
.access = LANDLOCK_ACCESS_FS_IOCTL_DEV,
1778
#ifdef CONFIG_AUDIT
1779
.deny_masks = landlock_file(file)->deny_masks,
1780
#endif /* CONFIG_AUDIT */
1781
});
1782
return -EACCES;
1783
}
1784
1785
static int hook_file_ioctl(struct file *file, unsigned int cmd,
1786
unsigned long arg)
1787
{
1788
return hook_file_ioctl_common(file, cmd, false);
1789
}
1790
1791
static int hook_file_ioctl_compat(struct file *file, unsigned int cmd,
1792
unsigned long arg)
1793
{
1794
return hook_file_ioctl_common(file, cmd, true);
1795
}
1796
1797
/*
1798
* Always allow sending signals between threads of the same process. This
1799
* ensures consistency with hook_task_kill().
1800
*/
1801
static bool control_current_fowner(struct fown_struct *const fown)
1802
{
1803
struct task_struct *p;
1804
1805
/*
1806
* Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix
1807
* file_set_fowner LSM hook inconsistencies").
1808
*/
1809
lockdep_assert_held(&fown->lock);
1810
1811
/*
1812
* Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side
1813
* critical section.
1814
*/
1815
guard(rcu)();
1816
p = pid_task(fown->pid, fown->pid_type);
1817
if (!p)
1818
return true;
1819
1820
return !same_thread_group(p, current);
1821
}
1822
1823
static void hook_file_set_fowner(struct file *file)
1824
{
1825
struct landlock_ruleset *prev_dom;
1826
struct landlock_cred_security fown_subject = {};
1827
size_t fown_layer = 0;
1828
1829
if (control_current_fowner(file_f_owner(file))) {
1830
static const struct access_masks signal_scope = {
1831
.scope = LANDLOCK_SCOPE_SIGNAL,
1832
};
1833
const struct landlock_cred_security *new_subject =
1834
landlock_get_applicable_subject(
1835
current_cred(), signal_scope, &fown_layer);
1836
if (new_subject) {
1837
landlock_get_ruleset(new_subject->domain);
1838
fown_subject = *new_subject;
1839
}
1840
}
1841
1842
prev_dom = landlock_file(file)->fown_subject.domain;
1843
landlock_file(file)->fown_subject = fown_subject;
1844
#ifdef CONFIG_AUDIT
1845
landlock_file(file)->fown_layer = fown_layer;
1846
#endif /* CONFIG_AUDIT*/
1847
1848
/* May be called in an RCU read-side critical section. */
1849
landlock_put_ruleset_deferred(prev_dom);
1850
}
1851
1852
static void hook_file_free_security(struct file *file)
1853
{
1854
landlock_put_ruleset_deferred(landlock_file(file)->fown_subject.domain);
1855
}
1856
1857
static struct security_hook_list landlock_hooks[] __ro_after_init = {
1858
LSM_HOOK_INIT(inode_free_security_rcu, hook_inode_free_security_rcu),
1859
1860
LSM_HOOK_INIT(sb_delete, hook_sb_delete),
1861
LSM_HOOK_INIT(sb_mount, hook_sb_mount),
1862
LSM_HOOK_INIT(move_mount, hook_move_mount),
1863
LSM_HOOK_INIT(sb_umount, hook_sb_umount),
1864
LSM_HOOK_INIT(sb_remount, hook_sb_remount),
1865
LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
1866
1867
LSM_HOOK_INIT(path_link, hook_path_link),
1868
LSM_HOOK_INIT(path_rename, hook_path_rename),
1869
LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
1870
LSM_HOOK_INIT(path_mknod, hook_path_mknod),
1871
LSM_HOOK_INIT(path_symlink, hook_path_symlink),
1872
LSM_HOOK_INIT(path_unlink, hook_path_unlink),
1873
LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
1874
LSM_HOOK_INIT(path_truncate, hook_path_truncate),
1875
1876
LSM_HOOK_INIT(file_alloc_security, hook_file_alloc_security),
1877
LSM_HOOK_INIT(file_open, hook_file_open),
1878
LSM_HOOK_INIT(file_truncate, hook_file_truncate),
1879
LSM_HOOK_INIT(file_ioctl, hook_file_ioctl),
1880
LSM_HOOK_INIT(file_ioctl_compat, hook_file_ioctl_compat),
1881
LSM_HOOK_INIT(file_set_fowner, hook_file_set_fowner),
1882
LSM_HOOK_INIT(file_free_security, hook_file_free_security),
1883
};
1884
1885
__init void landlock_add_fs_hooks(void)
1886
{
1887
security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
1888
&landlock_lsmid);
1889
}
1890
1891
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
1892
1893
/* clang-format off */
1894
static struct kunit_case test_cases[] = {
1895
KUNIT_CASE(test_no_more_access),
1896
KUNIT_CASE(test_scope_to_request_with_exec_none),
1897
KUNIT_CASE(test_scope_to_request_with_exec_some),
1898
KUNIT_CASE(test_scope_to_request_without_access),
1899
KUNIT_CASE(test_is_eacces_with_none),
1900
KUNIT_CASE(test_is_eacces_with_refer),
1901
KUNIT_CASE(test_is_eacces_with_write),
1902
{}
1903
};
1904
/* clang-format on */
1905
1906
static struct kunit_suite test_suite = {
1907
.name = "landlock_fs",
1908
.test_cases = test_cases,
1909
};
1910
1911
kunit_test_suite(test_suite);
1912
1913
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
1914
1915