Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/security/apparmor/lsm.c
26378 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* AppArmor security module
4
*
5
* This file contains AppArmor LSM hooks.
6
*
7
* Copyright (C) 1998-2008 Novell/SUSE
8
* Copyright 2009-2010 Canonical Ltd.
9
*/
10
11
#include <linux/lsm_hooks.h>
12
#include <linux/moduleparam.h>
13
#include <linux/mm.h>
14
#include <linux/mman.h>
15
#include <linux/mount.h>
16
#include <linux/namei.h>
17
#include <linux/ptrace.h>
18
#include <linux/ctype.h>
19
#include <linux/sysctl.h>
20
#include <linux/audit.h>
21
#include <linux/user_namespace.h>
22
#include <linux/netfilter_ipv4.h>
23
#include <linux/netfilter_ipv6.h>
24
#include <linux/zstd.h>
25
#include <net/sock.h>
26
#include <uapi/linux/mount.h>
27
#include <uapi/linux/lsm.h>
28
29
#include "include/af_unix.h"
30
#include "include/apparmor.h"
31
#include "include/apparmorfs.h"
32
#include "include/audit.h"
33
#include "include/capability.h"
34
#include "include/cred.h"
35
#include "include/file.h"
36
#include "include/ipc.h"
37
#include "include/net.h"
38
#include "include/path.h"
39
#include "include/label.h"
40
#include "include/policy.h"
41
#include "include/policy_ns.h"
42
#include "include/procattr.h"
43
#include "include/mount.h"
44
#include "include/secid.h"
45
46
/* Flag indicating whether initialization completed */
47
int apparmor_initialized;
48
49
union aa_buffer {
50
struct list_head list;
51
DECLARE_FLEX_ARRAY(char, buffer);
52
};
53
54
struct aa_local_cache {
55
unsigned int hold;
56
unsigned int count;
57
struct list_head head;
58
};
59
60
#define RESERVE_COUNT 2
61
static int reserve_count = RESERVE_COUNT;
62
static int buffer_count;
63
64
static LIST_HEAD(aa_global_buffers);
65
static DEFINE_SPINLOCK(aa_buffers_lock);
66
static DEFINE_PER_CPU(struct aa_local_cache, aa_local_buffers);
67
68
/*
69
* LSM hook functions
70
*/
71
72
/*
73
* put the associated labels
74
*/
75
static void apparmor_cred_free(struct cred *cred)
76
{
77
aa_put_label(cred_label(cred));
78
set_cred_label(cred, NULL);
79
}
80
81
/*
82
* allocate the apparmor part of blank credentials
83
*/
84
static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
85
{
86
set_cred_label(cred, NULL);
87
return 0;
88
}
89
90
/*
91
* prepare new cred label for modification by prepare_cred block
92
*/
93
static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
94
gfp_t gfp)
95
{
96
set_cred_label(new, aa_get_newest_label(cred_label(old)));
97
return 0;
98
}
99
100
/*
101
* transfer the apparmor data to a blank set of creds
102
*/
103
static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
104
{
105
set_cred_label(new, aa_get_newest_label(cred_label(old)));
106
}
107
108
static void apparmor_task_free(struct task_struct *task)
109
{
110
111
aa_free_task_ctx(task_ctx(task));
112
}
113
114
static int apparmor_task_alloc(struct task_struct *task,
115
unsigned long clone_flags)
116
{
117
struct aa_task_ctx *new = task_ctx(task);
118
119
aa_dup_task_ctx(new, task_ctx(current));
120
121
return 0;
122
}
123
124
static int apparmor_ptrace_access_check(struct task_struct *child,
125
unsigned int mode)
126
{
127
struct aa_label *tracer, *tracee;
128
const struct cred *cred;
129
int error;
130
bool needput;
131
132
cred = get_task_cred(child);
133
tracee = cred_label(cred); /* ref count on cred */
134
tracer = __begin_current_label_crit_section(&needput);
135
error = aa_may_ptrace(current_cred(), tracer, cred, tracee,
136
(mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
137
: AA_PTRACE_TRACE);
138
__end_current_label_crit_section(tracer, needput);
139
put_cred(cred);
140
141
return error;
142
}
143
144
static int apparmor_ptrace_traceme(struct task_struct *parent)
145
{
146
struct aa_label *tracer, *tracee;
147
const struct cred *cred;
148
int error;
149
bool needput;
150
151
tracee = __begin_current_label_crit_section(&needput);
152
cred = get_task_cred(parent);
153
tracer = cred_label(cred); /* ref count on cred */
154
error = aa_may_ptrace(cred, tracer, current_cred(), tracee,
155
AA_PTRACE_TRACE);
156
put_cred(cred);
157
__end_current_label_crit_section(tracee, needput);
158
159
return error;
160
}
161
162
/* Derived from security/commoncap.c:cap_capget */
163
static int apparmor_capget(const struct task_struct *target, kernel_cap_t *effective,
164
kernel_cap_t *inheritable, kernel_cap_t *permitted)
165
{
166
struct aa_label *label;
167
const struct cred *cred;
168
169
rcu_read_lock();
170
cred = __task_cred(target);
171
label = aa_get_newest_cred_label(cred);
172
173
/*
174
* cap_capget is stacked ahead of this and will
175
* initialize effective and permitted.
176
*/
177
if (!unconfined(label)) {
178
struct aa_profile *profile;
179
struct label_it i;
180
181
label_for_each_confined(i, label, profile) {
182
kernel_cap_t allowed;
183
184
allowed = aa_profile_capget(profile);
185
*effective = cap_intersect(*effective, allowed);
186
*permitted = cap_intersect(*permitted, allowed);
187
}
188
}
189
rcu_read_unlock();
190
aa_put_label(label);
191
192
return 0;
193
}
194
195
static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
196
int cap, unsigned int opts)
197
{
198
struct aa_label *label;
199
int error = 0;
200
201
label = aa_get_newest_cred_label(cred);
202
if (!unconfined(label))
203
error = aa_capable(cred, label, cap, opts);
204
aa_put_label(label);
205
206
return error;
207
}
208
209
/**
210
* common_perm - basic common permission check wrapper fn for paths
211
* @op: operation being checked
212
* @path: path to check permission of (NOT NULL)
213
* @mask: requested permissions mask
214
* @cond: conditional info for the permission request (NOT NULL)
215
*
216
* Returns: %0 else error code if error or permission denied
217
*/
218
static int common_perm(const char *op, const struct path *path, u32 mask,
219
struct path_cond *cond)
220
{
221
struct aa_label *label;
222
int error = 0;
223
bool needput;
224
225
label = __begin_current_label_crit_section(&needput);
226
if (!unconfined(label))
227
error = aa_path_perm(op, current_cred(), label, path, 0, mask,
228
cond);
229
__end_current_label_crit_section(label, needput);
230
231
return error;
232
}
233
234
/**
235
* common_perm_cond - common permission wrapper around inode cond
236
* @op: operation being checked
237
* @path: location to check (NOT NULL)
238
* @mask: requested permissions mask
239
*
240
* Returns: %0 else error code if error or permission denied
241
*/
242
static int common_perm_cond(const char *op, const struct path *path, u32 mask)
243
{
244
vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_idmap(path->mnt),
245
d_backing_inode(path->dentry));
246
struct path_cond cond = {
247
vfsuid_into_kuid(vfsuid),
248
d_backing_inode(path->dentry)->i_mode
249
};
250
251
if (!path_mediated_fs(path->dentry))
252
return 0;
253
254
return common_perm(op, path, mask, &cond);
255
}
256
257
/**
258
* common_perm_dir_dentry - common permission wrapper when path is dir, dentry
259
* @op: operation being checked
260
* @dir: directory of the dentry (NOT NULL)
261
* @dentry: dentry to check (NOT NULL)
262
* @mask: requested permissions mask
263
* @cond: conditional info for the permission request (NOT NULL)
264
*
265
* Returns: %0 else error code if error or permission denied
266
*/
267
static int common_perm_dir_dentry(const char *op, const struct path *dir,
268
struct dentry *dentry, u32 mask,
269
struct path_cond *cond)
270
{
271
struct path path = { .mnt = dir->mnt, .dentry = dentry };
272
273
return common_perm(op, &path, mask, cond);
274
}
275
276
/**
277
* common_perm_rm - common permission wrapper for operations doing rm
278
* @op: operation being checked
279
* @dir: directory that the dentry is in (NOT NULL)
280
* @dentry: dentry being rm'd (NOT NULL)
281
* @mask: requested permission mask
282
*
283
* Returns: %0 else error code if error or permission denied
284
*/
285
static int common_perm_rm(const char *op, const struct path *dir,
286
struct dentry *dentry, u32 mask)
287
{
288
struct inode *inode = d_backing_inode(dentry);
289
struct path_cond cond = { };
290
vfsuid_t vfsuid;
291
292
if (!inode || !path_mediated_fs(dentry))
293
return 0;
294
295
vfsuid = i_uid_into_vfsuid(mnt_idmap(dir->mnt), inode);
296
cond.uid = vfsuid_into_kuid(vfsuid);
297
cond.mode = inode->i_mode;
298
299
return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
300
}
301
302
/**
303
* common_perm_create - common permission wrapper for operations doing create
304
* @op: operation being checked
305
* @dir: directory that dentry will be created in (NOT NULL)
306
* @dentry: dentry to create (NOT NULL)
307
* @mask: request permission mask
308
* @mode: created file mode
309
*
310
* Returns: %0 else error code if error or permission denied
311
*/
312
static int common_perm_create(const char *op, const struct path *dir,
313
struct dentry *dentry, u32 mask, umode_t mode)
314
{
315
struct path_cond cond = { current_fsuid(), mode };
316
317
if (!path_mediated_fs(dir->dentry))
318
return 0;
319
320
return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
321
}
322
323
static int apparmor_path_unlink(const struct path *dir, struct dentry *dentry)
324
{
325
return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE);
326
}
327
328
static int apparmor_path_mkdir(const struct path *dir, struct dentry *dentry,
329
umode_t mode)
330
{
331
return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE,
332
S_IFDIR);
333
}
334
335
static int apparmor_path_rmdir(const struct path *dir, struct dentry *dentry)
336
{
337
return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE);
338
}
339
340
static int apparmor_path_mknod(const struct path *dir, struct dentry *dentry,
341
umode_t mode, unsigned int dev)
342
{
343
return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode);
344
}
345
346
static int apparmor_path_truncate(const struct path *path)
347
{
348
return common_perm_cond(OP_TRUNC, path, MAY_WRITE | AA_MAY_SETATTR);
349
}
350
351
static int apparmor_file_truncate(struct file *file)
352
{
353
return apparmor_path_truncate(&file->f_path);
354
}
355
356
static int apparmor_path_symlink(const struct path *dir, struct dentry *dentry,
357
const char *old_name)
358
{
359
return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE,
360
S_IFLNK);
361
}
362
363
static int apparmor_path_link(struct dentry *old_dentry, const struct path *new_dir,
364
struct dentry *new_dentry)
365
{
366
struct aa_label *label;
367
int error = 0;
368
369
if (!path_mediated_fs(old_dentry))
370
return 0;
371
372
label = begin_current_label_crit_section();
373
if (!unconfined(label))
374
error = aa_path_link(current_cred(), label, old_dentry, new_dir,
375
new_dentry);
376
end_current_label_crit_section(label);
377
378
return error;
379
}
380
381
static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_dentry,
382
const struct path *new_dir, struct dentry *new_dentry,
383
const unsigned int flags)
384
{
385
struct aa_label *label;
386
int error = 0;
387
388
if (!path_mediated_fs(old_dentry))
389
return 0;
390
if ((flags & RENAME_EXCHANGE) && !path_mediated_fs(new_dentry))
391
return 0;
392
393
label = begin_current_label_crit_section();
394
if (!unconfined(label)) {
395
struct mnt_idmap *idmap = mnt_idmap(old_dir->mnt);
396
vfsuid_t vfsuid;
397
struct path old_path = { .mnt = old_dir->mnt,
398
.dentry = old_dentry };
399
struct path new_path = { .mnt = new_dir->mnt,
400
.dentry = new_dentry };
401
struct path_cond cond = {
402
.mode = d_backing_inode(old_dentry)->i_mode
403
};
404
vfsuid = i_uid_into_vfsuid(idmap, d_backing_inode(old_dentry));
405
cond.uid = vfsuid_into_kuid(vfsuid);
406
407
if (flags & RENAME_EXCHANGE) {
408
struct path_cond cond_exchange = {
409
.mode = d_backing_inode(new_dentry)->i_mode,
410
};
411
vfsuid = i_uid_into_vfsuid(idmap, d_backing_inode(old_dentry));
412
cond_exchange.uid = vfsuid_into_kuid(vfsuid);
413
414
error = aa_path_perm(OP_RENAME_SRC, current_cred(),
415
label, &new_path, 0,
416
MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
417
AA_MAY_SETATTR | AA_MAY_DELETE,
418
&cond_exchange);
419
if (!error)
420
error = aa_path_perm(OP_RENAME_DEST, current_cred(),
421
label, &old_path,
422
0, MAY_WRITE | AA_MAY_SETATTR |
423
AA_MAY_CREATE, &cond_exchange);
424
}
425
426
if (!error)
427
error = aa_path_perm(OP_RENAME_SRC, current_cred(),
428
label, &old_path, 0,
429
MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
430
AA_MAY_SETATTR | AA_MAY_DELETE,
431
&cond);
432
if (!error)
433
error = aa_path_perm(OP_RENAME_DEST, current_cred(),
434
label, &new_path,
435
0, MAY_WRITE | AA_MAY_SETATTR |
436
AA_MAY_CREATE, &cond);
437
438
}
439
end_current_label_crit_section(label);
440
441
return error;
442
}
443
444
static int apparmor_path_chmod(const struct path *path, umode_t mode)
445
{
446
return common_perm_cond(OP_CHMOD, path, AA_MAY_CHMOD);
447
}
448
449
static int apparmor_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
450
{
451
return common_perm_cond(OP_CHOWN, path, AA_MAY_CHOWN);
452
}
453
454
static int apparmor_inode_getattr(const struct path *path)
455
{
456
return common_perm_cond(OP_GETATTR, path, AA_MAY_GETATTR);
457
}
458
459
static int apparmor_file_open(struct file *file)
460
{
461
struct aa_file_ctx *fctx = file_ctx(file);
462
struct aa_label *label;
463
int error = 0;
464
bool needput;
465
466
if (!path_mediated_fs(file->f_path.dentry))
467
return 0;
468
469
/* If in exec, permission is handled by bprm hooks.
470
* Cache permissions granted by the previous exec check, with
471
* implicit read and executable mmap which are required to
472
* actually execute the image.
473
*
474
* Illogically, FMODE_EXEC is in f_flags, not f_mode.
475
*/
476
if (file->f_flags & __FMODE_EXEC) {
477
fctx->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
478
return 0;
479
}
480
481
label = aa_get_newest_cred_label_condref(file->f_cred, &needput);
482
if (!unconfined(label)) {
483
struct mnt_idmap *idmap = file_mnt_idmap(file);
484
struct inode *inode = file_inode(file);
485
vfsuid_t vfsuid;
486
struct path_cond cond = {
487
.mode = inode->i_mode,
488
};
489
vfsuid = i_uid_into_vfsuid(idmap, inode);
490
cond.uid = vfsuid_into_kuid(vfsuid);
491
492
error = aa_path_perm(OP_OPEN, file->f_cred,
493
label, &file->f_path, 0,
494
aa_map_file_to_perms(file), &cond);
495
/* todo cache full allowed permissions set and state */
496
fctx->allow = aa_map_file_to_perms(file);
497
}
498
aa_put_label_condref(label, needput);
499
500
return error;
501
}
502
503
static int apparmor_file_alloc_security(struct file *file)
504
{
505
struct aa_file_ctx *ctx = file_ctx(file);
506
struct aa_label *label = begin_current_label_crit_section();
507
508
spin_lock_init(&ctx->lock);
509
rcu_assign_pointer(ctx->label, aa_get_label(label));
510
end_current_label_crit_section(label);
511
return 0;
512
}
513
514
static void apparmor_file_free_security(struct file *file)
515
{
516
struct aa_file_ctx *ctx = file_ctx(file);
517
518
if (ctx)
519
aa_put_label(rcu_access_pointer(ctx->label));
520
}
521
522
static int common_file_perm(const char *op, struct file *file, u32 mask,
523
bool in_atomic)
524
{
525
struct aa_label *label;
526
int error = 0;
527
bool needput;
528
529
/* don't reaudit files closed during inheritance */
530
if (unlikely(file->f_path.dentry == aa_null.dentry))
531
return -EACCES;
532
533
label = __begin_current_label_crit_section(&needput);
534
error = aa_file_perm(op, current_cred(), label, file, mask, in_atomic);
535
__end_current_label_crit_section(label, needput);
536
537
return error;
538
}
539
540
static int apparmor_file_receive(struct file *file)
541
{
542
return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file),
543
false);
544
}
545
546
static int apparmor_file_permission(struct file *file, int mask)
547
{
548
return common_file_perm(OP_FPERM, file, mask, false);
549
}
550
551
static int apparmor_file_lock(struct file *file, unsigned int cmd)
552
{
553
u32 mask = AA_MAY_LOCK;
554
555
if (cmd == F_WRLCK)
556
mask |= MAY_WRITE;
557
558
return common_file_perm(OP_FLOCK, file, mask, false);
559
}
560
561
static int common_mmap(const char *op, struct file *file, unsigned long prot,
562
unsigned long flags, bool in_atomic)
563
{
564
int mask = 0;
565
566
if (!file || !file_ctx(file))
567
return 0;
568
569
if (prot & PROT_READ)
570
mask |= MAY_READ;
571
/*
572
* Private mappings don't require write perms since they don't
573
* write back to the files
574
*/
575
if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE))
576
mask |= MAY_WRITE;
577
if (prot & PROT_EXEC)
578
mask |= AA_EXEC_MMAP;
579
580
return common_file_perm(op, file, mask, in_atomic);
581
}
582
583
static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
584
unsigned long prot, unsigned long flags)
585
{
586
return common_mmap(OP_FMMAP, file, prot, flags, GFP_ATOMIC);
587
}
588
589
static int apparmor_file_mprotect(struct vm_area_struct *vma,
590
unsigned long reqprot, unsigned long prot)
591
{
592
return common_mmap(OP_FMPROT, vma->vm_file, prot,
593
!(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0,
594
false);
595
}
596
597
#ifdef CONFIG_IO_URING
598
static const char *audit_uring_mask(u32 mask)
599
{
600
if (mask & AA_MAY_CREATE_SQPOLL)
601
return "sqpoll";
602
if (mask & AA_MAY_OVERRIDE_CRED)
603
return "override_creds";
604
return "";
605
}
606
607
static void audit_uring_cb(struct audit_buffer *ab, void *va)
608
{
609
struct apparmor_audit_data *ad = aad_of_va(va);
610
611
if (ad->request & AA_URING_PERM_MASK) {
612
audit_log_format(ab, " requested=\"%s\"",
613
audit_uring_mask(ad->request));
614
if (ad->denied & AA_URING_PERM_MASK) {
615
audit_log_format(ab, " denied=\"%s\"",
616
audit_uring_mask(ad->denied));
617
}
618
}
619
if (ad->uring.target) {
620
audit_log_format(ab, " tcontext=");
621
aa_label_xaudit(ab, labels_ns(ad->subj_label),
622
ad->uring.target,
623
FLAGS_NONE, GFP_ATOMIC);
624
}
625
}
626
627
static int profile_uring(struct aa_profile *profile, u32 request,
628
struct aa_label *new, int cap,
629
struct apparmor_audit_data *ad)
630
{
631
unsigned int state;
632
struct aa_ruleset *rules;
633
int error = 0;
634
635
AA_BUG(!profile);
636
637
rules = profile->label.rules[0];
638
state = RULE_MEDIATES(rules, AA_CLASS_IO_URING);
639
if (state) {
640
struct aa_perms perms = { };
641
642
if (new) {
643
aa_label_match(profile, rules, new, state,
644
false, request, &perms);
645
} else {
646
perms = *aa_lookup_perms(rules->policy, state);
647
}
648
aa_apply_modes_to_perms(profile, &perms);
649
error = aa_check_perms(profile, &perms, request, ad,
650
audit_uring_cb);
651
}
652
653
return error;
654
}
655
656
/**
657
* apparmor_uring_override_creds - check the requested cred override
658
* @new: the target creds
659
*
660
* Check to see if the current task is allowed to override it's credentials
661
* to service an io_uring operation.
662
*/
663
static int apparmor_uring_override_creds(const struct cred *new)
664
{
665
struct aa_profile *profile;
666
struct aa_label *label;
667
int error;
668
bool needput;
669
DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_IO_URING,
670
OP_URING_OVERRIDE);
671
672
ad.uring.target = cred_label(new);
673
label = __begin_current_label_crit_section(&needput);
674
error = fn_for_each(label, profile,
675
profile_uring(profile, AA_MAY_OVERRIDE_CRED,
676
cred_label(new), CAP_SYS_ADMIN, &ad));
677
__end_current_label_crit_section(label, needput);
678
679
return error;
680
}
681
682
/**
683
* apparmor_uring_sqpoll - check if a io_uring polling thread can be created
684
*
685
* Check to see if the current task is allowed to create a new io_uring
686
* kernel polling thread.
687
*/
688
static int apparmor_uring_sqpoll(void)
689
{
690
struct aa_profile *profile;
691
struct aa_label *label;
692
int error;
693
bool needput;
694
DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_IO_URING,
695
OP_URING_SQPOLL);
696
697
label = __begin_current_label_crit_section(&needput);
698
error = fn_for_each(label, profile,
699
profile_uring(profile, AA_MAY_CREATE_SQPOLL,
700
NULL, CAP_SYS_ADMIN, &ad));
701
__end_current_label_crit_section(label, needput);
702
703
return error;
704
}
705
#endif /* CONFIG_IO_URING */
706
707
static int apparmor_sb_mount(const char *dev_name, const struct path *path,
708
const char *type, unsigned long flags, void *data)
709
{
710
struct aa_label *label;
711
int error = 0;
712
bool needput;
713
714
/* Discard magic */
715
if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
716
flags &= ~MS_MGC_MSK;
717
718
flags &= ~AA_MS_IGNORE_MASK;
719
720
label = __begin_current_label_crit_section(&needput);
721
if (!unconfined(label)) {
722
if (flags & MS_REMOUNT)
723
error = aa_remount(current_cred(), label, path, flags,
724
data);
725
else if (flags & MS_BIND)
726
error = aa_bind_mount(current_cred(), label, path,
727
dev_name, flags);
728
else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE |
729
MS_UNBINDABLE))
730
error = aa_mount_change_type(current_cred(), label,
731
path, flags);
732
else if (flags & MS_MOVE)
733
error = aa_move_mount_old(current_cred(), label, path,
734
dev_name);
735
else
736
error = aa_new_mount(current_cred(), label, dev_name,
737
path, type, flags, data);
738
}
739
__end_current_label_crit_section(label, needput);
740
741
return error;
742
}
743
744
static int apparmor_move_mount(const struct path *from_path,
745
const struct path *to_path)
746
{
747
struct aa_label *label;
748
int error = 0;
749
bool needput;
750
751
label = __begin_current_label_crit_section(&needput);
752
if (!unconfined(label))
753
error = aa_move_mount(current_cred(), label, from_path,
754
to_path);
755
__end_current_label_crit_section(label, needput);
756
757
return error;
758
}
759
760
static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
761
{
762
struct aa_label *label;
763
int error = 0;
764
bool needput;
765
766
label = __begin_current_label_crit_section(&needput);
767
if (!unconfined(label))
768
error = aa_umount(current_cred(), label, mnt, flags);
769
__end_current_label_crit_section(label, needput);
770
771
return error;
772
}
773
774
static int apparmor_sb_pivotroot(const struct path *old_path,
775
const struct path *new_path)
776
{
777
struct aa_label *label;
778
int error = 0;
779
780
label = aa_get_current_label();
781
if (!unconfined(label))
782
error = aa_pivotroot(current_cred(), label, old_path, new_path);
783
aa_put_label(label);
784
785
return error;
786
}
787
788
static int apparmor_getselfattr(unsigned int attr, struct lsm_ctx __user *lx,
789
u32 *size, u32 flags)
790
{
791
int error = -ENOENT;
792
struct aa_task_ctx *ctx = task_ctx(current);
793
struct aa_label *label = NULL;
794
char *value = NULL;
795
796
switch (attr) {
797
case LSM_ATTR_CURRENT:
798
label = aa_get_newest_label(cred_label(current_cred()));
799
break;
800
case LSM_ATTR_PREV:
801
if (ctx->previous)
802
label = aa_get_newest_label(ctx->previous);
803
break;
804
case LSM_ATTR_EXEC:
805
if (ctx->onexec)
806
label = aa_get_newest_label(ctx->onexec);
807
break;
808
default:
809
error = -EOPNOTSUPP;
810
break;
811
}
812
813
if (label) {
814
error = aa_getprocattr(label, &value, false);
815
if (error > 0)
816
error = lsm_fill_user_ctx(lx, size, value, error,
817
LSM_ID_APPARMOR, 0);
818
kfree(value);
819
}
820
821
aa_put_label(label);
822
823
if (error < 0)
824
return error;
825
return 1;
826
}
827
828
static int apparmor_getprocattr(struct task_struct *task, const char *name,
829
char **value)
830
{
831
int error = -ENOENT;
832
/* released below */
833
const struct cred *cred = get_task_cred(task);
834
struct aa_task_ctx *ctx = task_ctx(current);
835
struct aa_label *label = NULL;
836
837
if (strcmp(name, "current") == 0)
838
label = aa_get_newest_label(cred_label(cred));
839
else if (strcmp(name, "prev") == 0 && ctx->previous)
840
label = aa_get_newest_label(ctx->previous);
841
else if (strcmp(name, "exec") == 0 && ctx->onexec)
842
label = aa_get_newest_label(ctx->onexec);
843
else
844
error = -EINVAL;
845
846
if (label)
847
error = aa_getprocattr(label, value, true);
848
849
aa_put_label(label);
850
put_cred(cred);
851
852
return error;
853
}
854
855
static int do_setattr(u64 attr, void *value, size_t size)
856
{
857
char *command, *largs = NULL, *args = value;
858
size_t arg_size;
859
int error;
860
DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE,
861
OP_SETPROCATTR);
862
863
if (size == 0)
864
return -EINVAL;
865
866
/* AppArmor requires that the buffer must be null terminated atm */
867
if (args[size - 1] != '\0') {
868
/* null terminate */
869
largs = args = kmalloc(size + 1, GFP_KERNEL);
870
if (!args)
871
return -ENOMEM;
872
memcpy(args, value, size);
873
args[size] = '\0';
874
}
875
876
error = -EINVAL;
877
args = strim(args);
878
command = strsep(&args, " ");
879
if (!args)
880
goto out;
881
args = skip_spaces(args);
882
if (!*args)
883
goto out;
884
885
arg_size = size - (args - (largs ? largs : (char *) value));
886
if (attr == LSM_ATTR_CURRENT) {
887
if (strcmp(command, "changehat") == 0) {
888
error = aa_setprocattr_changehat(args, arg_size,
889
AA_CHANGE_NOFLAGS);
890
} else if (strcmp(command, "permhat") == 0) {
891
error = aa_setprocattr_changehat(args, arg_size,
892
AA_CHANGE_TEST);
893
} else if (strcmp(command, "changeprofile") == 0) {
894
error = aa_change_profile(args, AA_CHANGE_NOFLAGS);
895
} else if (strcmp(command, "permprofile") == 0) {
896
error = aa_change_profile(args, AA_CHANGE_TEST);
897
} else if (strcmp(command, "stack") == 0) {
898
error = aa_change_profile(args, AA_CHANGE_STACK);
899
} else
900
goto fail;
901
} else if (attr == LSM_ATTR_EXEC) {
902
if (strcmp(command, "exec") == 0)
903
error = aa_change_profile(args, AA_CHANGE_ONEXEC);
904
else if (strcmp(command, "stack") == 0)
905
error = aa_change_profile(args, (AA_CHANGE_ONEXEC |
906
AA_CHANGE_STACK));
907
else
908
goto fail;
909
} else
910
/* only support the "current" and "exec" process attributes */
911
goto fail;
912
913
if (!error)
914
error = size;
915
out:
916
kfree(largs);
917
return error;
918
919
fail:
920
ad.subj_label = begin_current_label_crit_section();
921
if (attr == LSM_ATTR_CURRENT)
922
ad.info = "current";
923
else if (attr == LSM_ATTR_EXEC)
924
ad.info = "exec";
925
else
926
ad.info = "invalid";
927
ad.error = error = -EINVAL;
928
aa_audit_msg(AUDIT_APPARMOR_DENIED, &ad, NULL);
929
end_current_label_crit_section(ad.subj_label);
930
goto out;
931
}
932
933
static int apparmor_setselfattr(unsigned int attr, struct lsm_ctx *ctx,
934
u32 size, u32 flags)
935
{
936
int rc;
937
938
if (attr != LSM_ATTR_CURRENT && attr != LSM_ATTR_EXEC)
939
return -EOPNOTSUPP;
940
941
rc = do_setattr(attr, ctx->ctx, ctx->ctx_len);
942
if (rc > 0)
943
return 0;
944
return rc;
945
}
946
947
static int apparmor_setprocattr(const char *name, void *value,
948
size_t size)
949
{
950
int attr = lsm_name_to_attr(name);
951
952
if (attr)
953
return do_setattr(attr, value, size);
954
return -EINVAL;
955
}
956
957
/**
958
* apparmor_bprm_committing_creds - do task cleanup on committing new creds
959
* @bprm: binprm for the exec (NOT NULL)
960
*/
961
static void apparmor_bprm_committing_creds(const struct linux_binprm *bprm)
962
{
963
struct aa_label *label = aa_current_raw_label();
964
struct aa_label *new_label = cred_label(bprm->cred);
965
966
/* bail out if unconfined or not changing profile */
967
if ((new_label->proxy == label->proxy) ||
968
(unconfined(new_label)))
969
return;
970
971
aa_inherit_files(bprm->cred, current->files);
972
973
current->pdeath_signal = 0;
974
975
/* reset soft limits and set hard limits for the new label */
976
__aa_transition_rlimits(label, new_label);
977
}
978
979
/**
980
* apparmor_bprm_committed_creds() - do cleanup after new creds committed
981
* @bprm: binprm for the exec (NOT NULL)
982
*/
983
static void apparmor_bprm_committed_creds(const struct linux_binprm *bprm)
984
{
985
/* clear out temporary/transitional state from the context */
986
aa_clear_task_ctx_trans(task_ctx(current));
987
988
return;
989
}
990
991
static void apparmor_current_getlsmprop_subj(struct lsm_prop *prop)
992
{
993
struct aa_label *label;
994
bool needput;
995
996
label = __begin_current_label_crit_section(&needput);
997
prop->apparmor.label = label;
998
__end_current_label_crit_section(label, needput);
999
}
1000
1001
static void apparmor_task_getlsmprop_obj(struct task_struct *p,
1002
struct lsm_prop *prop)
1003
{
1004
struct aa_label *label = aa_get_task_label(p);
1005
1006
prop->apparmor.label = label;
1007
aa_put_label(label);
1008
}
1009
1010
static int apparmor_task_setrlimit(struct task_struct *task,
1011
unsigned int resource, struct rlimit *new_rlim)
1012
{
1013
struct aa_label *label;
1014
int error = 0;
1015
bool needput;
1016
1017
label = __begin_current_label_crit_section(&needput);
1018
1019
if (!unconfined(label))
1020
error = aa_task_setrlimit(current_cred(), label, task,
1021
resource, new_rlim);
1022
__end_current_label_crit_section(label, needput);
1023
1024
return error;
1025
}
1026
1027
static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo *info,
1028
int sig, const struct cred *cred)
1029
{
1030
const struct cred *tc;
1031
struct aa_label *cl, *tl;
1032
int error;
1033
bool needput;
1034
1035
tc = get_task_cred(target);
1036
tl = aa_get_newest_cred_label(tc);
1037
if (cred) {
1038
/*
1039
* Dealing with USB IO specific behavior
1040
*/
1041
cl = aa_get_newest_cred_label(cred);
1042
error = aa_may_signal(cred, cl, tc, tl, sig);
1043
aa_put_label(cl);
1044
} else {
1045
cl = __begin_current_label_crit_section(&needput);
1046
error = aa_may_signal(current_cred(), cl, tc, tl, sig);
1047
__end_current_label_crit_section(cl, needput);
1048
}
1049
aa_put_label(tl);
1050
put_cred(tc);
1051
1052
return error;
1053
}
1054
1055
static int apparmor_userns_create(const struct cred *cred)
1056
{
1057
struct aa_label *label;
1058
struct aa_profile *profile;
1059
int error = 0;
1060
DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_TASK, AA_CLASS_NS,
1061
OP_USERNS_CREATE);
1062
1063
ad.subj_cred = current_cred();
1064
1065
label = begin_current_label_crit_section();
1066
if (!unconfined(label)) {
1067
error = fn_for_each(label, profile,
1068
aa_profile_ns_perm(profile, &ad,
1069
AA_USERNS_CREATE));
1070
}
1071
end_current_label_crit_section(label);
1072
1073
return error;
1074
}
1075
1076
static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t gfp)
1077
{
1078
struct aa_sk_ctx *ctx = aa_sock(sk);
1079
struct aa_label *label;
1080
bool needput;
1081
1082
label = __begin_current_label_crit_section(&needput);
1083
//spin_lock_init(&ctx->lock);
1084
rcu_assign_pointer(ctx->label, aa_get_label(label));
1085
rcu_assign_pointer(ctx->peer, NULL);
1086
rcu_assign_pointer(ctx->peer_lastupdate, NULL);
1087
__end_current_label_crit_section(label, needput);
1088
return 0;
1089
}
1090
1091
static void apparmor_sk_free_security(struct sock *sk)
1092
{
1093
struct aa_sk_ctx *ctx = aa_sock(sk);
1094
1095
/* dead these won't be updated any more */
1096
aa_put_label(rcu_dereference_protected(ctx->label, true));
1097
aa_put_label(rcu_dereference_protected(ctx->peer, true));
1098
aa_put_label(rcu_dereference_protected(ctx->peer_lastupdate, true));
1099
}
1100
1101
/**
1102
* apparmor_sk_clone_security - clone the sk_security field
1103
* @sk: sock to have security cloned
1104
* @newsk: sock getting clone
1105
*/
1106
static void apparmor_sk_clone_security(const struct sock *sk,
1107
struct sock *newsk)
1108
{
1109
struct aa_sk_ctx *ctx = aa_sock(sk);
1110
struct aa_sk_ctx *new = aa_sock(newsk);
1111
1112
/* not actually in use yet */
1113
if (rcu_access_pointer(ctx->label) != rcu_access_pointer(new->label)) {
1114
aa_put_label(rcu_dereference_protected(new->label, true));
1115
rcu_assign_pointer(new->label, aa_get_label_rcu(&ctx->label));
1116
}
1117
1118
if (rcu_access_pointer(ctx->peer) != rcu_access_pointer(new->peer)) {
1119
aa_put_label(rcu_dereference_protected(new->peer, true));
1120
rcu_assign_pointer(new->peer, aa_get_label_rcu(&ctx->peer));
1121
}
1122
1123
if (rcu_access_pointer(ctx->peer_lastupdate) != rcu_access_pointer(new->peer_lastupdate)) {
1124
aa_put_label(rcu_dereference_protected(new->peer_lastupdate, true));
1125
rcu_assign_pointer(new->peer_lastupdate,
1126
aa_get_label_rcu(&ctx->peer_lastupdate));
1127
}
1128
}
1129
1130
static int unix_connect_perm(const struct cred *cred, struct aa_label *label,
1131
struct sock *sk, struct sock *peer_sk)
1132
{
1133
struct aa_sk_ctx *peer_ctx = aa_sock(peer_sk);
1134
int error;
1135
1136
error = aa_unix_peer_perm(cred, label, OP_CONNECT,
1137
(AA_MAY_CONNECT | AA_MAY_SEND | AA_MAY_RECEIVE),
1138
sk, peer_sk,
1139
rcu_dereference_protected(peer_ctx->label,
1140
lockdep_is_held(&unix_sk(peer_sk)->lock)));
1141
if (!is_unix_fs(peer_sk)) {
1142
last_error(error,
1143
aa_unix_peer_perm(cred,
1144
rcu_dereference_protected(peer_ctx->label,
1145
lockdep_is_held(&unix_sk(peer_sk)->lock)),
1146
OP_CONNECT,
1147
(AA_MAY_ACCEPT | AA_MAY_SEND | AA_MAY_RECEIVE),
1148
peer_sk, sk, label));
1149
}
1150
1151
return error;
1152
}
1153
1154
/* lockdep check in unix_connect_perm - push sks here to check */
1155
static void unix_connect_peers(struct aa_sk_ctx *sk_ctx,
1156
struct aa_sk_ctx *peer_ctx)
1157
{
1158
/* Cross reference the peer labels for SO_PEERSEC */
1159
struct aa_label *label = rcu_dereference_protected(sk_ctx->label, true);
1160
1161
aa_get_label(label);
1162
aa_put_label(rcu_dereference_protected(peer_ctx->peer,
1163
true));
1164
rcu_assign_pointer(peer_ctx->peer, label); /* transfer cnt */
1165
1166
label = aa_get_label(rcu_dereference_protected(peer_ctx->label,
1167
true));
1168
//spin_unlock(&peer_ctx->lock);
1169
1170
//spin_lock(&sk_ctx->lock);
1171
aa_put_label(rcu_dereference_protected(sk_ctx->peer,
1172
true));
1173
aa_put_label(rcu_dereference_protected(sk_ctx->peer_lastupdate,
1174
true));
1175
1176
rcu_assign_pointer(sk_ctx->peer, aa_get_label(label));
1177
rcu_assign_pointer(sk_ctx->peer_lastupdate, label); /* transfer cnt */
1178
//spin_unlock(&sk_ctx->lock);
1179
}
1180
1181
/**
1182
* apparmor_unix_stream_connect - check perms before making unix domain conn
1183
* @sk: sk attempting to connect
1184
* @peer_sk: sk that is accepting the connection
1185
* @newsk: new sk created for this connection
1186
* peer is locked when this hook is called
1187
*
1188
* Return:
1189
* 0 if connection is permitted
1190
* error code on denial or failure
1191
*/
1192
static int apparmor_unix_stream_connect(struct sock *sk, struct sock *peer_sk,
1193
struct sock *newsk)
1194
{
1195
struct aa_sk_ctx *sk_ctx = aa_sock(sk);
1196
struct aa_sk_ctx *peer_ctx = aa_sock(peer_sk);
1197
struct aa_sk_ctx *new_ctx = aa_sock(newsk);
1198
struct aa_label *label;
1199
int error;
1200
bool needput;
1201
1202
label = __begin_current_label_crit_section(&needput);
1203
error = unix_connect_perm(current_cred(), label, sk, peer_sk);
1204
__end_current_label_crit_section(label, needput);
1205
1206
if (error)
1207
return error;
1208
1209
/* newsk doesn't go through post_create, but does go through
1210
* security_sk_alloc()
1211
*/
1212
rcu_assign_pointer(new_ctx->label,
1213
aa_get_label(rcu_dereference_protected(peer_ctx->label,
1214
true)));
1215
1216
/* Cross reference the peer labels for SO_PEERSEC */
1217
unix_connect_peers(sk_ctx, new_ctx);
1218
1219
return 0;
1220
}
1221
1222
/**
1223
* apparmor_unix_may_send - check perms before conn or sending unix dgrams
1224
* @sock: socket sending the message
1225
* @peer: socket message is being send to
1226
*
1227
* Performs bidirectional permission checks for Unix domain socket communication:
1228
* 1. Verifies sender has AA_MAY_SEND to target socket
1229
* 2. Verifies receiver has AA_MAY_RECEIVE from source socket
1230
*
1231
* sock and peer are locked when this hook is called
1232
* called by: dgram_connect peer setup but path not copied to newsk
1233
*
1234
* Return:
1235
* 0 if transmission is permitted
1236
* error code on denial or failure
1237
*/
1238
static int apparmor_unix_may_send(struct socket *sock, struct socket *peer)
1239
{
1240
struct aa_sk_ctx *peer_ctx = aa_sock(peer->sk);
1241
struct aa_label *label;
1242
int error;
1243
bool needput;
1244
1245
label = __begin_current_label_crit_section(&needput);
1246
error = xcheck(aa_unix_peer_perm(current_cred(),
1247
label, OP_SENDMSG, AA_MAY_SEND,
1248
sock->sk, peer->sk,
1249
rcu_dereference_protected(peer_ctx->label,
1250
true)),
1251
aa_unix_peer_perm(peer->file ? peer->file->f_cred : NULL,
1252
rcu_dereference_protected(peer_ctx->label,
1253
true),
1254
OP_SENDMSG, AA_MAY_RECEIVE, peer->sk,
1255
sock->sk, label));
1256
__end_current_label_crit_section(label, needput);
1257
1258
return error;
1259
}
1260
1261
static int apparmor_socket_create(int family, int type, int protocol, int kern)
1262
{
1263
struct aa_label *label;
1264
int error = 0;
1265
1266
AA_BUG(in_interrupt());
1267
1268
if (kern)
1269
return 0;
1270
1271
label = begin_current_label_crit_section();
1272
if (!unconfined(label)) {
1273
if (family == PF_UNIX)
1274
error = aa_unix_create_perm(label, family, type,
1275
protocol);
1276
else
1277
error = aa_af_perm(current_cred(), label, OP_CREATE,
1278
AA_MAY_CREATE, family, type,
1279
protocol);
1280
}
1281
end_current_label_crit_section(label);
1282
1283
return error;
1284
}
1285
1286
/**
1287
* apparmor_socket_post_create - setup the per-socket security struct
1288
* @sock: socket that is being setup
1289
* @family: family of socket being created
1290
* @type: type of the socket
1291
* @protocol: protocol of the socket
1292
* @kern: socket is a special kernel socket
1293
*
1294
* Note:
1295
* - kernel sockets labeled kernel_t used to use unconfined
1296
* - socket may not have sk here if created with sock_create_lite or
1297
* sock_alloc. These should be accept cases which will be handled in
1298
* sock_graft.
1299
*/
1300
static int apparmor_socket_post_create(struct socket *sock, int family,
1301
int type, int protocol, int kern)
1302
{
1303
struct aa_label *label;
1304
1305
if (kern) {
1306
label = aa_get_label(kernel_t);
1307
} else
1308
label = aa_get_current_label();
1309
1310
if (sock->sk) {
1311
struct aa_sk_ctx *ctx = aa_sock(sock->sk);
1312
1313
/* still not live */
1314
aa_put_label(rcu_dereference_protected(ctx->label, true));
1315
rcu_assign_pointer(ctx->label, aa_get_label(label));
1316
}
1317
aa_put_label(label);
1318
1319
return 0;
1320
}
1321
1322
static int apparmor_socket_socketpair(struct socket *socka,
1323
struct socket *sockb)
1324
{
1325
struct aa_sk_ctx *a_ctx = aa_sock(socka->sk);
1326
struct aa_sk_ctx *b_ctx = aa_sock(sockb->sk);
1327
struct aa_label *label;
1328
1329
/* socks not live yet - initial values set in sk_alloc */
1330
label = begin_current_label_crit_section();
1331
if (rcu_access_pointer(a_ctx->label) != label) {
1332
AA_BUG("a_ctx != label");
1333
aa_put_label(rcu_dereference_protected(a_ctx->label, true));
1334
rcu_assign_pointer(a_ctx->label, aa_get_label(label));
1335
}
1336
if (rcu_access_pointer(b_ctx->label) != label) {
1337
AA_BUG("b_ctx != label");
1338
aa_put_label(rcu_dereference_protected(b_ctx->label, true));
1339
rcu_assign_pointer(b_ctx->label, aa_get_label(label));
1340
}
1341
1342
if (socka->sk->sk_family == PF_UNIX) {
1343
/* unix socket pairs by-pass unix_stream_connect */
1344
unix_connect_peers(a_ctx, b_ctx);
1345
}
1346
end_current_label_crit_section(label);
1347
1348
return 0;
1349
}
1350
1351
/**
1352
* apparmor_socket_bind - check perms before bind addr to socket
1353
* @sock: socket to bind the address to (must be non-NULL)
1354
* @address: address that is being bound (must be non-NULL)
1355
* @addrlen: length of @address
1356
*
1357
* Performs security checks before allowing a socket to bind to an address.
1358
* Handles Unix domain sockets specially through aa_unix_bind_perm().
1359
* For other socket families, uses generic permission check via aa_sk_perm().
1360
*
1361
* Return:
1362
* 0 if binding is permitted
1363
* error code on denial or invalid parameters
1364
*/
1365
static int apparmor_socket_bind(struct socket *sock,
1366
struct sockaddr *address, int addrlen)
1367
{
1368
AA_BUG(!sock);
1369
AA_BUG(!sock->sk);
1370
AA_BUG(!address);
1371
AA_BUG(in_interrupt());
1372
1373
if (sock->sk->sk_family == PF_UNIX)
1374
return aa_unix_bind_perm(sock, address, addrlen);
1375
return aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk);
1376
}
1377
1378
static int apparmor_socket_connect(struct socket *sock,
1379
struct sockaddr *address, int addrlen)
1380
{
1381
AA_BUG(!sock);
1382
AA_BUG(!sock->sk);
1383
AA_BUG(!address);
1384
AA_BUG(in_interrupt());
1385
1386
/* PF_UNIX goes through unix_stream_connect && unix_may_send */
1387
if (sock->sk->sk_family == PF_UNIX)
1388
return 0;
1389
return aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk);
1390
}
1391
1392
static int apparmor_socket_listen(struct socket *sock, int backlog)
1393
{
1394
AA_BUG(!sock);
1395
AA_BUG(!sock->sk);
1396
AA_BUG(in_interrupt());
1397
1398
if (sock->sk->sk_family == PF_UNIX)
1399
return aa_unix_listen_perm(sock, backlog);
1400
return aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk);
1401
}
1402
1403
/*
1404
* Note: while @newsock is created and has some information, the accept
1405
* has not been done.
1406
*/
1407
static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
1408
{
1409
AA_BUG(!sock);
1410
AA_BUG(!sock->sk);
1411
AA_BUG(!newsock);
1412
AA_BUG(in_interrupt());
1413
1414
if (sock->sk->sk_family == PF_UNIX)
1415
return aa_unix_accept_perm(sock, newsock);
1416
return aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk);
1417
}
1418
1419
static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
1420
struct msghdr *msg, int size)
1421
{
1422
AA_BUG(!sock);
1423
AA_BUG(!sock->sk);
1424
AA_BUG(!msg);
1425
AA_BUG(in_interrupt());
1426
1427
/* PF_UNIX goes through unix_may_send */
1428
if (sock->sk->sk_family == PF_UNIX)
1429
return 0;
1430
return aa_sk_perm(op, request, sock->sk);
1431
}
1432
1433
static int apparmor_socket_sendmsg(struct socket *sock,
1434
struct msghdr *msg, int size)
1435
{
1436
return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
1437
}
1438
1439
static int apparmor_socket_recvmsg(struct socket *sock,
1440
struct msghdr *msg, int size, int flags)
1441
{
1442
return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size);
1443
}
1444
1445
/* revaliation, get/set attr, shutdown */
1446
static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
1447
{
1448
AA_BUG(!sock);
1449
AA_BUG(!sock->sk);
1450
AA_BUG(in_interrupt());
1451
1452
if (sock->sk->sk_family == PF_UNIX)
1453
return aa_unix_sock_perm(op, request, sock);
1454
return aa_sk_perm(op, request, sock->sk);
1455
}
1456
1457
static int apparmor_socket_getsockname(struct socket *sock)
1458
{
1459
return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
1460
}
1461
1462
static int apparmor_socket_getpeername(struct socket *sock)
1463
{
1464
return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
1465
}
1466
1467
/* revaliation, get/set attr, opt */
1468
static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
1469
int level, int optname)
1470
{
1471
AA_BUG(!sock);
1472
AA_BUG(!sock->sk);
1473
AA_BUG(in_interrupt());
1474
1475
if (sock->sk->sk_family == PF_UNIX)
1476
return aa_unix_opt_perm(op, request, sock, level, optname);
1477
return aa_sk_perm(op, request, sock->sk);
1478
}
1479
1480
static int apparmor_socket_getsockopt(struct socket *sock, int level,
1481
int optname)
1482
{
1483
return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock,
1484
level, optname);
1485
}
1486
1487
static int apparmor_socket_setsockopt(struct socket *sock, int level,
1488
int optname)
1489
{
1490
return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock,
1491
level, optname);
1492
}
1493
1494
static int apparmor_socket_shutdown(struct socket *sock, int how)
1495
{
1496
return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
1497
}
1498
1499
#ifdef CONFIG_NETWORK_SECMARK
1500
/**
1501
* apparmor_socket_sock_rcv_skb - check perms before associating skb to sk
1502
* @sk: sk to associate @skb with
1503
* @skb: skb to check for perms
1504
*
1505
* Note: can not sleep may be called with locks held
1506
*
1507
* dont want protocol specific in __skb_recv_datagram()
1508
* to deny an incoming connection socket_sock_rcv_skb()
1509
*/
1510
static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
1511
{
1512
struct aa_sk_ctx *ctx = aa_sock(sk);
1513
int error;
1514
1515
if (!skb->secmark)
1516
return 0;
1517
1518
/*
1519
* If reach here before socket_post_create hook is called, in which
1520
* case label is null, drop the packet.
1521
*/
1522
if (!rcu_access_pointer(ctx->label))
1523
return -EACCES;
1524
1525
rcu_read_lock();
1526
error = apparmor_secmark_check(rcu_dereference(ctx->label), OP_RECVMSG,
1527
AA_MAY_RECEIVE, skb->secmark, sk);
1528
rcu_read_unlock();
1529
1530
return error;
1531
}
1532
#endif
1533
1534
1535
static struct aa_label *sk_peer_get_label(struct sock *sk)
1536
{
1537
struct aa_sk_ctx *ctx = aa_sock(sk);
1538
struct aa_label *label = ERR_PTR(-ENOPROTOOPT);
1539
1540
if (rcu_access_pointer(ctx->peer))
1541
return aa_get_label_rcu(&ctx->peer);
1542
1543
if (sk->sk_family != PF_UNIX)
1544
return ERR_PTR(-ENOPROTOOPT);
1545
1546
return label;
1547
}
1548
1549
/**
1550
* apparmor_socket_getpeersec_stream - get security context of peer
1551
* @sock: socket that we are trying to get the peer context of
1552
* @optval: output - buffer to copy peer name to
1553
* @optlen: output - size of copied name in @optval
1554
* @len: size of @optval buffer
1555
* Returns: 0 on success, -errno of failure
1556
*
1557
* Note: for tcp only valid if using ipsec or cipso on lan
1558
*/
1559
static int apparmor_socket_getpeersec_stream(struct socket *sock,
1560
sockptr_t optval, sockptr_t optlen,
1561
unsigned int len)
1562
{
1563
char *name = NULL;
1564
int slen, error = 0;
1565
struct aa_label *label;
1566
struct aa_label *peer;
1567
1568
peer = sk_peer_get_label(sock->sk);
1569
if (IS_ERR(peer)) {
1570
error = PTR_ERR(peer);
1571
goto done;
1572
}
1573
label = begin_current_label_crit_section();
1574
slen = aa_label_asxprint(&name, labels_ns(label), peer,
1575
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
1576
FLAG_HIDDEN_UNCONFINED, GFP_KERNEL);
1577
/* don't include terminating \0 in slen, it breaks some apps */
1578
if (slen < 0) {
1579
error = -ENOMEM;
1580
goto done_put;
1581
}
1582
if (slen > len) {
1583
error = -ERANGE;
1584
goto done_len;
1585
}
1586
1587
if (copy_to_sockptr(optval, name, slen))
1588
error = -EFAULT;
1589
done_len:
1590
if (copy_to_sockptr(optlen, &slen, sizeof(slen)))
1591
error = -EFAULT;
1592
1593
done_put:
1594
end_current_label_crit_section(label);
1595
aa_put_label(peer);
1596
done:
1597
kfree(name);
1598
return error;
1599
}
1600
1601
/**
1602
* apparmor_socket_getpeersec_dgram - get security label of packet
1603
* @sock: the peer socket
1604
* @skb: packet data
1605
* @secid: pointer to where to put the secid of the packet
1606
*
1607
* Sets the netlabel socket state on sk from parent
1608
*/
1609
static int apparmor_socket_getpeersec_dgram(struct socket *sock,
1610
struct sk_buff *skb, u32 *secid)
1611
1612
{
1613
/* TODO: requires secid support */
1614
return -ENOPROTOOPT;
1615
}
1616
1617
/**
1618
* apparmor_sock_graft - Initialize newly created socket
1619
* @sk: child sock
1620
* @parent: parent socket
1621
*
1622
* Note: could set off of SOCK_CTX(parent) but need to track inode and we can
1623
* just set sk security information off of current creating process label
1624
* Labeling of sk for accept case - probably should be sock based
1625
* instead of task, because of the case where an implicitly labeled
1626
* socket is shared by different tasks.
1627
*/
1628
static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
1629
{
1630
struct aa_sk_ctx *ctx = aa_sock(sk);
1631
1632
/* setup - not live */
1633
if (!rcu_access_pointer(ctx->label))
1634
rcu_assign_pointer(ctx->label, aa_get_current_label());
1635
}
1636
1637
#ifdef CONFIG_NETWORK_SECMARK
1638
static int apparmor_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
1639
struct request_sock *req)
1640
{
1641
struct aa_sk_ctx *ctx = aa_sock(sk);
1642
int error;
1643
1644
if (!skb->secmark)
1645
return 0;
1646
1647
rcu_read_lock();
1648
error = apparmor_secmark_check(rcu_dereference(ctx->label), OP_CONNECT,
1649
AA_MAY_CONNECT, skb->secmark, sk);
1650
rcu_read_unlock();
1651
1652
return error;
1653
}
1654
#endif
1655
1656
/*
1657
* The cred blob is a pointer to, not an instance of, an aa_label.
1658
*/
1659
struct lsm_blob_sizes apparmor_blob_sizes __ro_after_init = {
1660
.lbs_cred = sizeof(struct aa_label *),
1661
.lbs_file = sizeof(struct aa_file_ctx),
1662
.lbs_task = sizeof(struct aa_task_ctx),
1663
.lbs_sock = sizeof(struct aa_sk_ctx),
1664
};
1665
1666
static const struct lsm_id apparmor_lsmid = {
1667
.name = "apparmor",
1668
.id = LSM_ID_APPARMOR,
1669
};
1670
1671
static struct security_hook_list apparmor_hooks[] __ro_after_init = {
1672
LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
1673
LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
1674
LSM_HOOK_INIT(capget, apparmor_capget),
1675
LSM_HOOK_INIT(capable, apparmor_capable),
1676
1677
LSM_HOOK_INIT(move_mount, apparmor_move_mount),
1678
LSM_HOOK_INIT(sb_mount, apparmor_sb_mount),
1679
LSM_HOOK_INIT(sb_umount, apparmor_sb_umount),
1680
LSM_HOOK_INIT(sb_pivotroot, apparmor_sb_pivotroot),
1681
1682
LSM_HOOK_INIT(path_link, apparmor_path_link),
1683
LSM_HOOK_INIT(path_unlink, apparmor_path_unlink),
1684
LSM_HOOK_INIT(path_symlink, apparmor_path_symlink),
1685
LSM_HOOK_INIT(path_mkdir, apparmor_path_mkdir),
1686
LSM_HOOK_INIT(path_rmdir, apparmor_path_rmdir),
1687
LSM_HOOK_INIT(path_mknod, apparmor_path_mknod),
1688
LSM_HOOK_INIT(path_rename, apparmor_path_rename),
1689
LSM_HOOK_INIT(path_chmod, apparmor_path_chmod),
1690
LSM_HOOK_INIT(path_chown, apparmor_path_chown),
1691
LSM_HOOK_INIT(path_truncate, apparmor_path_truncate),
1692
LSM_HOOK_INIT(inode_getattr, apparmor_inode_getattr),
1693
1694
LSM_HOOK_INIT(file_open, apparmor_file_open),
1695
LSM_HOOK_INIT(file_receive, apparmor_file_receive),
1696
LSM_HOOK_INIT(file_permission, apparmor_file_permission),
1697
LSM_HOOK_INIT(file_alloc_security, apparmor_file_alloc_security),
1698
LSM_HOOK_INIT(file_free_security, apparmor_file_free_security),
1699
LSM_HOOK_INIT(mmap_file, apparmor_mmap_file),
1700
LSM_HOOK_INIT(file_mprotect, apparmor_file_mprotect),
1701
LSM_HOOK_INIT(file_lock, apparmor_file_lock),
1702
LSM_HOOK_INIT(file_truncate, apparmor_file_truncate),
1703
1704
LSM_HOOK_INIT(getselfattr, apparmor_getselfattr),
1705
LSM_HOOK_INIT(setselfattr, apparmor_setselfattr),
1706
LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
1707
LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
1708
1709
LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security),
1710
LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
1711
LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
1712
1713
LSM_HOOK_INIT(unix_stream_connect, apparmor_unix_stream_connect),
1714
LSM_HOOK_INIT(unix_may_send, apparmor_unix_may_send),
1715
1716
LSM_HOOK_INIT(socket_create, apparmor_socket_create),
1717
LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create),
1718
LSM_HOOK_INIT(socket_socketpair, apparmor_socket_socketpair),
1719
LSM_HOOK_INIT(socket_bind, apparmor_socket_bind),
1720
LSM_HOOK_INIT(socket_connect, apparmor_socket_connect),
1721
LSM_HOOK_INIT(socket_listen, apparmor_socket_listen),
1722
LSM_HOOK_INIT(socket_accept, apparmor_socket_accept),
1723
LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg),
1724
LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg),
1725
LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname),
1726
LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername),
1727
LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
1728
LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
1729
LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
1730
#ifdef CONFIG_NETWORK_SECMARK
1731
LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
1732
#endif
1733
LSM_HOOK_INIT(socket_getpeersec_stream,
1734
apparmor_socket_getpeersec_stream),
1735
LSM_HOOK_INIT(socket_getpeersec_dgram,
1736
apparmor_socket_getpeersec_dgram),
1737
LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
1738
#ifdef CONFIG_NETWORK_SECMARK
1739
LSM_HOOK_INIT(inet_conn_request, apparmor_inet_conn_request),
1740
#endif
1741
1742
LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
1743
LSM_HOOK_INIT(cred_free, apparmor_cred_free),
1744
LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
1745
LSM_HOOK_INIT(cred_transfer, apparmor_cred_transfer),
1746
1747
LSM_HOOK_INIT(bprm_creds_for_exec, apparmor_bprm_creds_for_exec),
1748
LSM_HOOK_INIT(bprm_committing_creds, apparmor_bprm_committing_creds),
1749
LSM_HOOK_INIT(bprm_committed_creds, apparmor_bprm_committed_creds),
1750
1751
LSM_HOOK_INIT(task_free, apparmor_task_free),
1752
LSM_HOOK_INIT(task_alloc, apparmor_task_alloc),
1753
LSM_HOOK_INIT(current_getlsmprop_subj,
1754
apparmor_current_getlsmprop_subj),
1755
LSM_HOOK_INIT(task_getlsmprop_obj, apparmor_task_getlsmprop_obj),
1756
LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit),
1757
LSM_HOOK_INIT(task_kill, apparmor_task_kill),
1758
LSM_HOOK_INIT(userns_create, apparmor_userns_create),
1759
1760
#ifdef CONFIG_AUDIT
1761
LSM_HOOK_INIT(audit_rule_init, aa_audit_rule_init),
1762
LSM_HOOK_INIT(audit_rule_known, aa_audit_rule_known),
1763
LSM_HOOK_INIT(audit_rule_match, aa_audit_rule_match),
1764
LSM_HOOK_INIT(audit_rule_free, aa_audit_rule_free),
1765
#endif
1766
1767
LSM_HOOK_INIT(secid_to_secctx, apparmor_secid_to_secctx),
1768
LSM_HOOK_INIT(lsmprop_to_secctx, apparmor_lsmprop_to_secctx),
1769
LSM_HOOK_INIT(secctx_to_secid, apparmor_secctx_to_secid),
1770
LSM_HOOK_INIT(release_secctx, apparmor_release_secctx),
1771
1772
#ifdef CONFIG_IO_URING
1773
LSM_HOOK_INIT(uring_override_creds, apparmor_uring_override_creds),
1774
LSM_HOOK_INIT(uring_sqpoll, apparmor_uring_sqpoll),
1775
#endif
1776
};
1777
1778
/*
1779
* AppArmor sysfs module parameters
1780
*/
1781
1782
static int param_set_aabool(const char *val, const struct kernel_param *kp);
1783
static int param_get_aabool(char *buffer, const struct kernel_param *kp);
1784
#define param_check_aabool param_check_bool
1785
static const struct kernel_param_ops param_ops_aabool = {
1786
.flags = KERNEL_PARAM_OPS_FL_NOARG,
1787
.set = param_set_aabool,
1788
.get = param_get_aabool
1789
};
1790
1791
static int param_set_aauint(const char *val, const struct kernel_param *kp);
1792
static int param_get_aauint(char *buffer, const struct kernel_param *kp);
1793
#define param_check_aauint param_check_uint
1794
static const struct kernel_param_ops param_ops_aauint = {
1795
.set = param_set_aauint,
1796
.get = param_get_aauint
1797
};
1798
1799
static int param_set_aacompressionlevel(const char *val,
1800
const struct kernel_param *kp);
1801
static int param_get_aacompressionlevel(char *buffer,
1802
const struct kernel_param *kp);
1803
#define param_check_aacompressionlevel param_check_int
1804
static const struct kernel_param_ops param_ops_aacompressionlevel = {
1805
.set = param_set_aacompressionlevel,
1806
.get = param_get_aacompressionlevel
1807
};
1808
1809
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
1810
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
1811
#define param_check_aalockpolicy param_check_bool
1812
static const struct kernel_param_ops param_ops_aalockpolicy = {
1813
.flags = KERNEL_PARAM_OPS_FL_NOARG,
1814
.set = param_set_aalockpolicy,
1815
.get = param_get_aalockpolicy
1816
};
1817
1818
static int param_set_debug(const char *val, const struct kernel_param *kp);
1819
static int param_get_debug(char *buffer, const struct kernel_param *kp);
1820
1821
static int param_set_audit(const char *val, const struct kernel_param *kp);
1822
static int param_get_audit(char *buffer, const struct kernel_param *kp);
1823
1824
static int param_set_mode(const char *val, const struct kernel_param *kp);
1825
static int param_get_mode(char *buffer, const struct kernel_param *kp);
1826
1827
/* Flag values, also controllable via /sys/module/apparmor/parameters
1828
* We define special types as we want to do additional mediation.
1829
*/
1830
1831
/* AppArmor global enforcement switch - complain, enforce, kill */
1832
enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE;
1833
module_param_call(mode, param_set_mode, param_get_mode,
1834
&aa_g_profile_mode, S_IRUSR | S_IWUSR);
1835
1836
/* whether policy verification hashing is enabled */
1837
bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT);
1838
#ifdef CONFIG_SECURITY_APPARMOR_HASH
1839
module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR);
1840
#endif
1841
1842
/* whether policy exactly as loaded is retained for debug and checkpointing */
1843
bool aa_g_export_binary = IS_ENABLED(CONFIG_SECURITY_APPARMOR_EXPORT_BINARY);
1844
#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
1845
module_param_named(export_binary, aa_g_export_binary, aabool, 0600);
1846
#endif
1847
1848
/* policy loaddata compression level */
1849
int aa_g_rawdata_compression_level = AA_DEFAULT_CLEVEL;
1850
module_param_named(rawdata_compression_level, aa_g_rawdata_compression_level,
1851
aacompressionlevel, 0400);
1852
1853
/* Debug mode */
1854
int aa_g_debug;
1855
module_param_call(debug, param_set_debug, param_get_debug,
1856
&aa_g_debug, 0600);
1857
1858
/* Audit mode */
1859
enum audit_mode aa_g_audit;
1860
module_param_call(audit, param_set_audit, param_get_audit,
1861
&aa_g_audit, S_IRUSR | S_IWUSR);
1862
1863
/* Determines if audit header is included in audited messages. This
1864
* provides more context if the audit daemon is not running
1865
*/
1866
bool aa_g_audit_header = true;
1867
module_param_named(audit_header, aa_g_audit_header, aabool,
1868
S_IRUSR | S_IWUSR);
1869
1870
/* lock out loading/removal of policy
1871
* TODO: add in at boot loading of policy, which is the only way to
1872
* load policy, if lock_policy is set
1873
*/
1874
bool aa_g_lock_policy;
1875
module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy,
1876
S_IRUSR | S_IWUSR);
1877
1878
/* Syscall logging mode */
1879
bool aa_g_logsyscall;
1880
module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR);
1881
1882
/* Maximum pathname length before accesses will start getting rejected */
1883
unsigned int aa_g_path_max = 2 * PATH_MAX;
1884
module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
1885
1886
/* Determines how paranoid loading of policy is and how much verification
1887
* on the loaded policy is done.
1888
* DEPRECATED: read only as strict checking of load is always done now
1889
* that none root users (user namespaces) can load policy.
1890
*/
1891
bool aa_g_paranoid_load = IS_ENABLED(CONFIG_SECURITY_APPARMOR_PARANOID_LOAD);
1892
module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
1893
1894
static int param_get_aaintbool(char *buffer, const struct kernel_param *kp);
1895
static int param_set_aaintbool(const char *val, const struct kernel_param *kp);
1896
#define param_check_aaintbool param_check_int
1897
static const struct kernel_param_ops param_ops_aaintbool = {
1898
.set = param_set_aaintbool,
1899
.get = param_get_aaintbool
1900
};
1901
/* Boot time disable flag */
1902
static int apparmor_enabled __ro_after_init = 1;
1903
module_param_named(enabled, apparmor_enabled, aaintbool, 0444);
1904
1905
static int __init apparmor_enabled_setup(char *str)
1906
{
1907
unsigned long enabled;
1908
int error = kstrtoul(str, 0, &enabled);
1909
if (!error)
1910
apparmor_enabled = enabled ? 1 : 0;
1911
return 1;
1912
}
1913
1914
__setup("apparmor=", apparmor_enabled_setup);
1915
1916
/* set global flag turning off the ability to load policy */
1917
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp)
1918
{
1919
if (!apparmor_enabled)
1920
return -EINVAL;
1921
if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
1922
return -EPERM;
1923
return param_set_bool(val, kp);
1924
}
1925
1926
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
1927
{
1928
if (!apparmor_enabled)
1929
return -EINVAL;
1930
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
1931
return -EPERM;
1932
return param_get_bool(buffer, kp);
1933
}
1934
1935
static int param_set_aabool(const char *val, const struct kernel_param *kp)
1936
{
1937
if (!apparmor_enabled)
1938
return -EINVAL;
1939
if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
1940
return -EPERM;
1941
return param_set_bool(val, kp);
1942
}
1943
1944
static int param_get_aabool(char *buffer, const struct kernel_param *kp)
1945
{
1946
if (!apparmor_enabled)
1947
return -EINVAL;
1948
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
1949
return -EPERM;
1950
return param_get_bool(buffer, kp);
1951
}
1952
1953
static int param_set_aauint(const char *val, const struct kernel_param *kp)
1954
{
1955
int error;
1956
1957
if (!apparmor_enabled)
1958
return -EINVAL;
1959
/* file is ro but enforce 2nd line check */
1960
if (apparmor_initialized)
1961
return -EPERM;
1962
1963
error = param_set_uint(val, kp);
1964
aa_g_path_max = max_t(uint32_t, aa_g_path_max, sizeof(union aa_buffer));
1965
pr_info("AppArmor: buffer size set to %d bytes\n", aa_g_path_max);
1966
1967
return error;
1968
}
1969
1970
static int param_get_aauint(char *buffer, const struct kernel_param *kp)
1971
{
1972
if (!apparmor_enabled)
1973
return -EINVAL;
1974
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
1975
return -EPERM;
1976
return param_get_uint(buffer, kp);
1977
}
1978
1979
/* Can only be set before AppArmor is initialized (i.e. on boot cmdline). */
1980
static int param_set_aaintbool(const char *val, const struct kernel_param *kp)
1981
{
1982
struct kernel_param kp_local;
1983
bool value;
1984
int error;
1985
1986
if (apparmor_initialized)
1987
return -EPERM;
1988
1989
/* Create local copy, with arg pointing to bool type. */
1990
value = !!*((int *)kp->arg);
1991
memcpy(&kp_local, kp, sizeof(kp_local));
1992
kp_local.arg = &value;
1993
1994
error = param_set_bool(val, &kp_local);
1995
if (!error)
1996
*((int *)kp->arg) = *((bool *)kp_local.arg);
1997
return error;
1998
}
1999
2000
/*
2001
* To avoid changing /sys/module/apparmor/parameters/enabled from Y/N to
2002
* 1/0, this converts the "int that is actually bool" back to bool for
2003
* display in the /sys filesystem, while keeping it "int" for the LSM
2004
* infrastructure.
2005
*/
2006
static int param_get_aaintbool(char *buffer, const struct kernel_param *kp)
2007
{
2008
struct kernel_param kp_local;
2009
bool value;
2010
2011
/* Create local copy, with arg pointing to bool type. */
2012
value = !!*((int *)kp->arg);
2013
memcpy(&kp_local, kp, sizeof(kp_local));
2014
kp_local.arg = &value;
2015
2016
return param_get_bool(buffer, &kp_local);
2017
}
2018
2019
static int param_set_aacompressionlevel(const char *val,
2020
const struct kernel_param *kp)
2021
{
2022
int error;
2023
2024
if (!apparmor_enabled)
2025
return -EINVAL;
2026
if (apparmor_initialized)
2027
return -EPERM;
2028
2029
error = param_set_int(val, kp);
2030
2031
aa_g_rawdata_compression_level = clamp(aa_g_rawdata_compression_level,
2032
AA_MIN_CLEVEL, AA_MAX_CLEVEL);
2033
pr_info("AppArmor: policy rawdata compression level set to %d\n",
2034
aa_g_rawdata_compression_level);
2035
2036
return error;
2037
}
2038
2039
static int param_get_aacompressionlevel(char *buffer,
2040
const struct kernel_param *kp)
2041
{
2042
if (!apparmor_enabled)
2043
return -EINVAL;
2044
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
2045
return -EPERM;
2046
return param_get_int(buffer, kp);
2047
}
2048
2049
static int param_get_debug(char *buffer, const struct kernel_param *kp)
2050
{
2051
if (!apparmor_enabled)
2052
return -EINVAL;
2053
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
2054
return -EPERM;
2055
return aa_print_debug_params(buffer);
2056
}
2057
2058
static int param_set_debug(const char *val, const struct kernel_param *kp)
2059
{
2060
int i;
2061
2062
if (!apparmor_enabled)
2063
return -EINVAL;
2064
if (!val)
2065
return -EINVAL;
2066
if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
2067
return -EPERM;
2068
2069
i = aa_parse_debug_params(val);
2070
if (i == DEBUG_PARSE_ERROR)
2071
return -EINVAL;
2072
2073
aa_g_debug = i;
2074
return 0;
2075
}
2076
2077
static int param_get_audit(char *buffer, const struct kernel_param *kp)
2078
{
2079
if (!apparmor_enabled)
2080
return -EINVAL;
2081
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
2082
return -EPERM;
2083
return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
2084
}
2085
2086
static int param_set_audit(const char *val, const struct kernel_param *kp)
2087
{
2088
int i;
2089
2090
if (!apparmor_enabled)
2091
return -EINVAL;
2092
if (!val)
2093
return -EINVAL;
2094
if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
2095
return -EPERM;
2096
2097
i = match_string(audit_mode_names, AUDIT_MAX_INDEX, val);
2098
if (i < 0)
2099
return -EINVAL;
2100
2101
aa_g_audit = i;
2102
return 0;
2103
}
2104
2105
static int param_get_mode(char *buffer, const struct kernel_param *kp)
2106
{
2107
if (!apparmor_enabled)
2108
return -EINVAL;
2109
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
2110
return -EPERM;
2111
2112
return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
2113
}
2114
2115
static int param_set_mode(const char *val, const struct kernel_param *kp)
2116
{
2117
int i;
2118
2119
if (!apparmor_enabled)
2120
return -EINVAL;
2121
if (!val)
2122
return -EINVAL;
2123
if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
2124
return -EPERM;
2125
2126
i = match_string(aa_profile_mode_names, APPARMOR_MODE_NAMES_MAX_INDEX,
2127
val);
2128
if (i < 0)
2129
return -EINVAL;
2130
2131
aa_g_profile_mode = i;
2132
return 0;
2133
}
2134
2135
char *aa_get_buffer(bool in_atomic)
2136
{
2137
union aa_buffer *aa_buf;
2138
struct aa_local_cache *cache;
2139
bool try_again = true;
2140
gfp_t flags = (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
2141
2142
/* use per cpu cached buffers first */
2143
cache = get_cpu_ptr(&aa_local_buffers);
2144
if (!list_empty(&cache->head)) {
2145
aa_buf = list_first_entry(&cache->head, union aa_buffer, list);
2146
list_del(&aa_buf->list);
2147
cache->hold--;
2148
cache->count--;
2149
put_cpu_ptr(&aa_local_buffers);
2150
return &aa_buf->buffer[0];
2151
}
2152
put_cpu_ptr(&aa_local_buffers);
2153
2154
if (!spin_trylock(&aa_buffers_lock)) {
2155
cache = get_cpu_ptr(&aa_local_buffers);
2156
cache->hold += 1;
2157
put_cpu_ptr(&aa_local_buffers);
2158
spin_lock(&aa_buffers_lock);
2159
} else {
2160
cache = get_cpu_ptr(&aa_local_buffers);
2161
put_cpu_ptr(&aa_local_buffers);
2162
}
2163
retry:
2164
if (buffer_count > reserve_count ||
2165
(in_atomic && !list_empty(&aa_global_buffers))) {
2166
aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
2167
list);
2168
list_del(&aa_buf->list);
2169
buffer_count--;
2170
spin_unlock(&aa_buffers_lock);
2171
return aa_buf->buffer;
2172
}
2173
if (in_atomic) {
2174
/*
2175
* out of reserve buffers and in atomic context so increase
2176
* how many buffers to keep in reserve
2177
*/
2178
reserve_count++;
2179
flags = GFP_ATOMIC;
2180
}
2181
spin_unlock(&aa_buffers_lock);
2182
2183
if (!in_atomic)
2184
might_sleep();
2185
aa_buf = kmalloc(aa_g_path_max, flags);
2186
if (!aa_buf) {
2187
if (try_again) {
2188
try_again = false;
2189
spin_lock(&aa_buffers_lock);
2190
goto retry;
2191
}
2192
pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n");
2193
return NULL;
2194
}
2195
return aa_buf->buffer;
2196
}
2197
2198
void aa_put_buffer(char *buf)
2199
{
2200
union aa_buffer *aa_buf;
2201
struct aa_local_cache *cache;
2202
2203
if (!buf)
2204
return;
2205
aa_buf = container_of(buf, union aa_buffer, buffer[0]);
2206
2207
cache = get_cpu_ptr(&aa_local_buffers);
2208
if (!cache->hold) {
2209
put_cpu_ptr(&aa_local_buffers);
2210
2211
if (spin_trylock(&aa_buffers_lock)) {
2212
/* put back on global list */
2213
list_add(&aa_buf->list, &aa_global_buffers);
2214
buffer_count++;
2215
spin_unlock(&aa_buffers_lock);
2216
cache = get_cpu_ptr(&aa_local_buffers);
2217
put_cpu_ptr(&aa_local_buffers);
2218
return;
2219
}
2220
/* contention on global list, fallback to percpu */
2221
cache = get_cpu_ptr(&aa_local_buffers);
2222
cache->hold += 1;
2223
}
2224
2225
/* cache in percpu list */
2226
list_add(&aa_buf->list, &cache->head);
2227
cache->count++;
2228
put_cpu_ptr(&aa_local_buffers);
2229
}
2230
2231
/*
2232
* AppArmor init functions
2233
*/
2234
2235
/**
2236
* set_init_ctx - set a task context and profile on the first task.
2237
*
2238
* TODO: allow setting an alternate profile than unconfined
2239
*/
2240
static int __init set_init_ctx(void)
2241
{
2242
struct cred *cred = (__force struct cred *)current->real_cred;
2243
2244
set_cred_label(cred, aa_get_label(ns_unconfined(root_ns)));
2245
2246
return 0;
2247
}
2248
2249
static void destroy_buffers(void)
2250
{
2251
union aa_buffer *aa_buf;
2252
2253
spin_lock(&aa_buffers_lock);
2254
while (!list_empty(&aa_global_buffers)) {
2255
aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
2256
list);
2257
list_del(&aa_buf->list);
2258
spin_unlock(&aa_buffers_lock);
2259
kfree(aa_buf);
2260
spin_lock(&aa_buffers_lock);
2261
}
2262
spin_unlock(&aa_buffers_lock);
2263
}
2264
2265
static int __init alloc_buffers(void)
2266
{
2267
union aa_buffer *aa_buf;
2268
int i, num;
2269
2270
/*
2271
* per cpu set of cached allocated buffers used to help reduce
2272
* lock contention
2273
*/
2274
for_each_possible_cpu(i) {
2275
per_cpu(aa_local_buffers, i).hold = 0;
2276
per_cpu(aa_local_buffers, i).count = 0;
2277
INIT_LIST_HEAD(&per_cpu(aa_local_buffers, i).head);
2278
}
2279
/*
2280
* A function may require two buffers at once. Usually the buffers are
2281
* used for a short period of time and are shared. On UP kernel buffers
2282
* two should be enough, with more CPUs it is possible that more
2283
* buffers will be used simultaneously. The preallocated pool may grow.
2284
* This preallocation has also the side-effect that AppArmor will be
2285
* disabled early at boot if aa_g_path_max is extremely high.
2286
*/
2287
if (num_online_cpus() > 1)
2288
num = 4 + RESERVE_COUNT;
2289
else
2290
num = 2 + RESERVE_COUNT;
2291
2292
for (i = 0; i < num; i++) {
2293
2294
aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL |
2295
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
2296
if (!aa_buf) {
2297
destroy_buffers();
2298
return -ENOMEM;
2299
}
2300
aa_put_buffer(aa_buf->buffer);
2301
}
2302
return 0;
2303
}
2304
2305
#ifdef CONFIG_SYSCTL
2306
static int apparmor_dointvec(const struct ctl_table *table, int write,
2307
void *buffer, size_t *lenp, loff_t *ppos)
2308
{
2309
if (!aa_current_policy_admin_capable(NULL))
2310
return -EPERM;
2311
if (!apparmor_enabled)
2312
return -EINVAL;
2313
2314
return proc_dointvec(table, write, buffer, lenp, ppos);
2315
}
2316
2317
static const struct ctl_table apparmor_sysctl_table[] = {
2318
#ifdef CONFIG_USER_NS
2319
{
2320
.procname = "unprivileged_userns_apparmor_policy",
2321
.data = &unprivileged_userns_apparmor_policy,
2322
.maxlen = sizeof(int),
2323
.mode = 0600,
2324
.proc_handler = apparmor_dointvec,
2325
},
2326
#endif /* CONFIG_USER_NS */
2327
{
2328
.procname = "apparmor_display_secid_mode",
2329
.data = &apparmor_display_secid_mode,
2330
.maxlen = sizeof(int),
2331
.mode = 0600,
2332
.proc_handler = apparmor_dointvec,
2333
},
2334
{
2335
.procname = "apparmor_restrict_unprivileged_unconfined",
2336
.data = &aa_unprivileged_unconfined_restricted,
2337
.maxlen = sizeof(int),
2338
.mode = 0600,
2339
.proc_handler = apparmor_dointvec,
2340
},
2341
};
2342
2343
static int __init apparmor_init_sysctl(void)
2344
{
2345
return register_sysctl("kernel", apparmor_sysctl_table) ? 0 : -ENOMEM;
2346
}
2347
#else
2348
static inline int apparmor_init_sysctl(void)
2349
{
2350
return 0;
2351
}
2352
#endif /* CONFIG_SYSCTL */
2353
2354
#if defined(CONFIG_NETFILTER) && defined(CONFIG_NETWORK_SECMARK)
2355
static unsigned int apparmor_ip_postroute(void *priv,
2356
struct sk_buff *skb,
2357
const struct nf_hook_state *state)
2358
{
2359
struct aa_sk_ctx *ctx;
2360
struct sock *sk;
2361
int error;
2362
2363
if (!skb->secmark)
2364
return NF_ACCEPT;
2365
2366
sk = skb_to_full_sk(skb);
2367
if (sk == NULL)
2368
return NF_ACCEPT;
2369
2370
ctx = aa_sock(sk);
2371
rcu_read_lock();
2372
error = apparmor_secmark_check(rcu_dereference(ctx->label), OP_SENDMSG,
2373
AA_MAY_SEND, skb->secmark, sk);
2374
rcu_read_unlock();
2375
if (!error)
2376
return NF_ACCEPT;
2377
2378
return NF_DROP_ERR(-ECONNREFUSED);
2379
2380
}
2381
2382
static const struct nf_hook_ops apparmor_nf_ops[] = {
2383
{
2384
.hook = apparmor_ip_postroute,
2385
.pf = NFPROTO_IPV4,
2386
.hooknum = NF_INET_POST_ROUTING,
2387
.priority = NF_IP_PRI_SELINUX_FIRST,
2388
},
2389
#if IS_ENABLED(CONFIG_IPV6)
2390
{
2391
.hook = apparmor_ip_postroute,
2392
.pf = NFPROTO_IPV6,
2393
.hooknum = NF_INET_POST_ROUTING,
2394
.priority = NF_IP6_PRI_SELINUX_FIRST,
2395
},
2396
#endif
2397
};
2398
2399
static int __net_init apparmor_nf_register(struct net *net)
2400
{
2401
return nf_register_net_hooks(net, apparmor_nf_ops,
2402
ARRAY_SIZE(apparmor_nf_ops));
2403
}
2404
2405
static void __net_exit apparmor_nf_unregister(struct net *net)
2406
{
2407
nf_unregister_net_hooks(net, apparmor_nf_ops,
2408
ARRAY_SIZE(apparmor_nf_ops));
2409
}
2410
2411
static struct pernet_operations apparmor_net_ops = {
2412
.init = apparmor_nf_register,
2413
.exit = apparmor_nf_unregister,
2414
};
2415
2416
static int __init apparmor_nf_ip_init(void)
2417
{
2418
int err;
2419
2420
if (!apparmor_enabled)
2421
return 0;
2422
2423
err = register_pernet_subsys(&apparmor_net_ops);
2424
if (err)
2425
panic("Apparmor: register_pernet_subsys: error %d\n", err);
2426
2427
return 0;
2428
}
2429
__initcall(apparmor_nf_ip_init);
2430
#endif
2431
2432
static char nulldfa_src[] __aligned(8) = {
2433
#include "nulldfa.in"
2434
};
2435
static struct aa_dfa *nulldfa;
2436
2437
static char stacksplitdfa_src[] __aligned(8) = {
2438
#include "stacksplitdfa.in"
2439
};
2440
struct aa_dfa *stacksplitdfa;
2441
struct aa_policydb *nullpdb;
2442
2443
static int __init aa_setup_dfa_engine(void)
2444
{
2445
int error = -ENOMEM;
2446
2447
nullpdb = aa_alloc_pdb(GFP_KERNEL);
2448
if (!nullpdb)
2449
return -ENOMEM;
2450
2451
nulldfa = aa_dfa_unpack(nulldfa_src, sizeof(nulldfa_src),
2452
TO_ACCEPT1_FLAG(YYTD_DATA32) |
2453
TO_ACCEPT2_FLAG(YYTD_DATA32));
2454
if (IS_ERR(nulldfa)) {
2455
error = PTR_ERR(nulldfa);
2456
goto fail;
2457
}
2458
nullpdb->dfa = aa_get_dfa(nulldfa);
2459
nullpdb->perms = kcalloc(2, sizeof(struct aa_perms), GFP_KERNEL);
2460
if (!nullpdb->perms)
2461
goto fail;
2462
nullpdb->size = 2;
2463
2464
stacksplitdfa = aa_dfa_unpack(stacksplitdfa_src,
2465
sizeof(stacksplitdfa_src),
2466
TO_ACCEPT1_FLAG(YYTD_DATA32) |
2467
TO_ACCEPT2_FLAG(YYTD_DATA32));
2468
if (IS_ERR(stacksplitdfa)) {
2469
error = PTR_ERR(stacksplitdfa);
2470
goto fail;
2471
}
2472
2473
return 0;
2474
2475
fail:
2476
aa_put_pdb(nullpdb);
2477
aa_put_dfa(nulldfa);
2478
nullpdb = NULL;
2479
nulldfa = NULL;
2480
stacksplitdfa = NULL;
2481
2482
return error;
2483
}
2484
2485
static void __init aa_teardown_dfa_engine(void)
2486
{
2487
aa_put_dfa(stacksplitdfa);
2488
aa_put_dfa(nulldfa);
2489
aa_put_pdb(nullpdb);
2490
nullpdb = NULL;
2491
stacksplitdfa = NULL;
2492
nulldfa = NULL;
2493
}
2494
2495
static int __init apparmor_init(void)
2496
{
2497
int error;
2498
2499
error = aa_setup_dfa_engine();
2500
if (error) {
2501
AA_ERROR("Unable to setup dfa engine\n");
2502
goto alloc_out;
2503
}
2504
2505
error = aa_alloc_root_ns();
2506
if (error) {
2507
AA_ERROR("Unable to allocate default profile namespace\n");
2508
goto alloc_out;
2509
}
2510
2511
error = apparmor_init_sysctl();
2512
if (error) {
2513
AA_ERROR("Unable to register sysctls\n");
2514
goto alloc_out;
2515
2516
}
2517
2518
error = alloc_buffers();
2519
if (error) {
2520
AA_ERROR("Unable to allocate work buffers\n");
2521
goto alloc_out;
2522
}
2523
2524
error = set_init_ctx();
2525
if (error) {
2526
AA_ERROR("Failed to set context on init task\n");
2527
aa_free_root_ns();
2528
goto buffers_out;
2529
}
2530
security_add_hooks(apparmor_hooks, ARRAY_SIZE(apparmor_hooks),
2531
&apparmor_lsmid);
2532
2533
/* Report that AppArmor successfully initialized */
2534
apparmor_initialized = 1;
2535
if (aa_g_profile_mode == APPARMOR_COMPLAIN)
2536
aa_info_message("AppArmor initialized: complain mode enabled");
2537
else if (aa_g_profile_mode == APPARMOR_KILL)
2538
aa_info_message("AppArmor initialized: kill mode enabled");
2539
else
2540
aa_info_message("AppArmor initialized");
2541
2542
return error;
2543
2544
buffers_out:
2545
destroy_buffers();
2546
alloc_out:
2547
aa_destroy_aafs();
2548
aa_teardown_dfa_engine();
2549
2550
apparmor_enabled = false;
2551
return error;
2552
}
2553
2554
DEFINE_LSM(apparmor) = {
2555
.name = "apparmor",
2556
.flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE,
2557
.enabled = &apparmor_enabled,
2558
.blobs = &apparmor_blob_sizes,
2559
.init = apparmor_init,
2560
};
2561
2562