Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/fs/cifs/cifsacl.c
15109 views
1
/*
2
* fs/cifs/cifsacl.c
3
*
4
* Copyright (C) International Business Machines Corp., 2007,2008
5
* Author(s): Steve French ([email protected])
6
*
7
* Contains the routines for mapping CIFS/NTFS ACLs
8
*
9
* This library is free software; you can redistribute it and/or modify
10
* it under the terms of the GNU Lesser General Public License as published
11
* by the Free Software Foundation; either version 2.1 of the License, or
12
* (at your option) any later version.
13
*
14
* This library is distributed in the hope that it will be useful,
15
* but WITHOUT ANY WARRANTY; without even the implied warranty of
16
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17
* the GNU Lesser General Public License for more details.
18
*
19
* You should have received a copy of the GNU Lesser General Public License
20
* along with this library; if not, write to the Free Software
21
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22
*/
23
24
#include <linux/fs.h>
25
#include <linux/slab.h>
26
#include <linux/string.h>
27
#include <linux/keyctl.h>
28
#include <linux/key-type.h>
29
#include <keys/user-type.h>
30
#include "cifspdu.h"
31
#include "cifsglob.h"
32
#include "cifsacl.h"
33
#include "cifsproto.h"
34
#include "cifs_debug.h"
35
36
/* security id for everyone/world system group */
37
static const struct cifs_sid sid_everyone = {
38
1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39
/* security id for Authenticated Users system group */
40
static const struct cifs_sid sid_authusers = {
41
1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
42
/* group users */
43
static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
44
45
const struct cred *root_cred;
46
47
static void
48
shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
49
int *nr_del)
50
{
51
struct rb_node *node;
52
struct rb_node *tmp;
53
struct cifs_sid_id *psidid;
54
55
node = rb_first(root);
56
while (node) {
57
tmp = node;
58
node = rb_next(tmp);
59
psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
60
if (nr_to_scan == 0 || *nr_del == nr_to_scan)
61
++(*nr_rem);
62
else {
63
if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
64
&& psidid->refcount == 0) {
65
rb_erase(tmp, root);
66
++(*nr_del);
67
} else
68
++(*nr_rem);
69
}
70
}
71
}
72
73
/*
74
* Run idmap cache shrinker.
75
*/
76
static int
77
cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
78
{
79
int nr_to_scan = sc->nr_to_scan;
80
int nr_del = 0;
81
int nr_rem = 0;
82
struct rb_root *root;
83
84
root = &uidtree;
85
spin_lock(&siduidlock);
86
shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
87
spin_unlock(&siduidlock);
88
89
root = &gidtree;
90
spin_lock(&sidgidlock);
91
shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
92
spin_unlock(&sidgidlock);
93
94
return nr_rem;
95
}
96
97
static struct shrinker cifs_shrinker = {
98
.shrink = cifs_idmap_shrinker,
99
.seeks = DEFAULT_SEEKS,
100
};
101
102
static int
103
cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
104
{
105
char *payload;
106
107
payload = kmalloc(datalen, GFP_KERNEL);
108
if (!payload)
109
return -ENOMEM;
110
111
memcpy(payload, data, datalen);
112
key->payload.data = payload;
113
return 0;
114
}
115
116
static inline void
117
cifs_idmap_key_destroy(struct key *key)
118
{
119
kfree(key->payload.data);
120
}
121
122
struct key_type cifs_idmap_key_type = {
123
.name = "cifs.idmap",
124
.instantiate = cifs_idmap_key_instantiate,
125
.destroy = cifs_idmap_key_destroy,
126
.describe = user_describe,
127
.match = user_match,
128
};
129
130
static void
131
sid_to_str(struct cifs_sid *sidptr, char *sidstr)
132
{
133
int i;
134
unsigned long saval;
135
char *strptr;
136
137
strptr = sidstr;
138
139
sprintf(strptr, "%s", "S");
140
strptr = sidstr + strlen(sidstr);
141
142
sprintf(strptr, "-%d", sidptr->revision);
143
strptr = sidstr + strlen(sidstr);
144
145
for (i = 0; i < 6; ++i) {
146
if (sidptr->authority[i]) {
147
sprintf(strptr, "-%d", sidptr->authority[i]);
148
strptr = sidstr + strlen(sidstr);
149
}
150
}
151
152
for (i = 0; i < sidptr->num_subauth; ++i) {
153
saval = le32_to_cpu(sidptr->sub_auth[i]);
154
sprintf(strptr, "-%ld", saval);
155
strptr = sidstr + strlen(sidstr);
156
}
157
}
158
159
static void
160
id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
161
struct cifs_sid_id **psidid, char *typestr)
162
{
163
int rc;
164
char *strptr;
165
struct rb_node *node = root->rb_node;
166
struct rb_node *parent = NULL;
167
struct rb_node **linkto = &(root->rb_node);
168
struct cifs_sid_id *lsidid;
169
170
while (node) {
171
lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
172
parent = node;
173
rc = compare_sids(sidptr, &((lsidid)->sid));
174
if (rc > 0) {
175
linkto = &(node->rb_left);
176
node = node->rb_left;
177
} else if (rc < 0) {
178
linkto = &(node->rb_right);
179
node = node->rb_right;
180
}
181
}
182
183
memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
184
(*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
185
(*psidid)->refcount = 0;
186
187
sprintf((*psidid)->sidstr, "%s", typestr);
188
strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
189
sid_to_str(&(*psidid)->sid, strptr);
190
191
clear_bit(SID_ID_PENDING, &(*psidid)->state);
192
clear_bit(SID_ID_MAPPED, &(*psidid)->state);
193
194
rb_link_node(&(*psidid)->rbnode, parent, linkto);
195
rb_insert_color(&(*psidid)->rbnode, root);
196
}
197
198
static struct cifs_sid_id *
199
id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
200
{
201
int rc;
202
struct rb_node *node = root->rb_node;
203
struct cifs_sid_id *lsidid;
204
205
while (node) {
206
lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
207
rc = compare_sids(sidptr, &((lsidid)->sid));
208
if (rc > 0) {
209
node = node->rb_left;
210
} else if (rc < 0) {
211
node = node->rb_right;
212
} else /* node found */
213
return lsidid;
214
}
215
216
return NULL;
217
}
218
219
static int
220
sidid_pending_wait(void *unused)
221
{
222
schedule();
223
return signal_pending(current) ? -ERESTARTSYS : 0;
224
}
225
226
static int
227
sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
228
struct cifs_fattr *fattr, uint sidtype)
229
{
230
int rc;
231
unsigned long cid;
232
struct key *idkey;
233
const struct cred *saved_cred;
234
struct cifs_sid_id *psidid, *npsidid;
235
struct rb_root *cidtree;
236
spinlock_t *cidlock;
237
238
if (sidtype == SIDOWNER) {
239
cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
240
cidlock = &siduidlock;
241
cidtree = &uidtree;
242
} else if (sidtype == SIDGROUP) {
243
cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
244
cidlock = &sidgidlock;
245
cidtree = &gidtree;
246
} else
247
return -ENOENT;
248
249
spin_lock(cidlock);
250
psidid = id_rb_search(cidtree, psid);
251
252
if (!psidid) { /* node does not exist, allocate one & attempt adding */
253
spin_unlock(cidlock);
254
npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
255
if (!npsidid)
256
return -ENOMEM;
257
258
npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
259
if (!npsidid->sidstr) {
260
kfree(npsidid);
261
return -ENOMEM;
262
}
263
264
spin_lock(cidlock);
265
psidid = id_rb_search(cidtree, psid);
266
if (psidid) { /* node happened to get inserted meanwhile */
267
++psidid->refcount;
268
spin_unlock(cidlock);
269
kfree(npsidid->sidstr);
270
kfree(npsidid);
271
} else {
272
psidid = npsidid;
273
id_rb_insert(cidtree, psid, &psidid,
274
sidtype == SIDOWNER ? "os:" : "gs:");
275
++psidid->refcount;
276
spin_unlock(cidlock);
277
}
278
} else {
279
++psidid->refcount;
280
spin_unlock(cidlock);
281
}
282
283
/*
284
* If we are here, it is safe to access psidid and its fields
285
* since a reference was taken earlier while holding the spinlock.
286
* A reference on the node is put without holding the spinlock
287
* and it is OK to do so in this case, shrinker will not erase
288
* this node until all references are put and we do not access
289
* any fields of the node after a reference is put .
290
*/
291
if (test_bit(SID_ID_MAPPED, &psidid->state)) {
292
cid = psidid->id;
293
psidid->time = jiffies; /* update ts for accessing */
294
goto sid_to_id_out;
295
}
296
297
if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
298
goto sid_to_id_out;
299
300
if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
301
saved_cred = override_creds(root_cred);
302
idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
303
if (IS_ERR(idkey))
304
cFYI(1, "%s: Can't map SID to an id", __func__);
305
else {
306
cid = *(unsigned long *)idkey->payload.value;
307
psidid->id = cid;
308
set_bit(SID_ID_MAPPED, &psidid->state);
309
key_put(idkey);
310
kfree(psidid->sidstr);
311
}
312
revert_creds(saved_cred);
313
psidid->time = jiffies; /* update ts for accessing */
314
clear_bit(SID_ID_PENDING, &psidid->state);
315
wake_up_bit(&psidid->state, SID_ID_PENDING);
316
} else {
317
rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
318
sidid_pending_wait, TASK_INTERRUPTIBLE);
319
if (rc) {
320
cFYI(1, "%s: sidid_pending_wait interrupted %d",
321
__func__, rc);
322
--psidid->refcount; /* decremented without spinlock */
323
return rc;
324
}
325
if (test_bit(SID_ID_MAPPED, &psidid->state))
326
cid = psidid->id;
327
}
328
329
sid_to_id_out:
330
--psidid->refcount; /* decremented without spinlock */
331
if (sidtype == SIDOWNER)
332
fattr->cf_uid = cid;
333
else
334
fattr->cf_gid = cid;
335
336
return 0;
337
}
338
339
int
340
init_cifs_idmap(void)
341
{
342
struct cred *cred;
343
struct key *keyring;
344
int ret;
345
346
cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name);
347
348
/* create an override credential set with a special thread keyring in
349
* which requests are cached
350
*
351
* this is used to prevent malicious redirections from being installed
352
* with add_key().
353
*/
354
cred = prepare_kernel_cred(NULL);
355
if (!cred)
356
return -ENOMEM;
357
358
keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
359
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
360
KEY_USR_VIEW | KEY_USR_READ,
361
KEY_ALLOC_NOT_IN_QUOTA);
362
if (IS_ERR(keyring)) {
363
ret = PTR_ERR(keyring);
364
goto failed_put_cred;
365
}
366
367
ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
368
if (ret < 0)
369
goto failed_put_key;
370
371
ret = register_key_type(&cifs_idmap_key_type);
372
if (ret < 0)
373
goto failed_put_key;
374
375
/* instruct request_key() to use this special keyring as a cache for
376
* the results it looks up */
377
cred->thread_keyring = keyring;
378
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
379
root_cred = cred;
380
381
spin_lock_init(&siduidlock);
382
uidtree = RB_ROOT;
383
spin_lock_init(&sidgidlock);
384
gidtree = RB_ROOT;
385
386
register_shrinker(&cifs_shrinker);
387
388
cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
389
return 0;
390
391
failed_put_key:
392
key_put(keyring);
393
failed_put_cred:
394
put_cred(cred);
395
return ret;
396
}
397
398
void
399
exit_cifs_idmap(void)
400
{
401
key_revoke(root_cred->thread_keyring);
402
unregister_key_type(&cifs_idmap_key_type);
403
put_cred(root_cred);
404
unregister_shrinker(&cifs_shrinker);
405
cFYI(1, "Unregistered %s key type\n", cifs_idmap_key_type.name);
406
}
407
408
void
409
cifs_destroy_idmaptrees(void)
410
{
411
struct rb_root *root;
412
struct rb_node *node;
413
414
root = &uidtree;
415
spin_lock(&siduidlock);
416
while ((node = rb_first(root)))
417
rb_erase(node, root);
418
spin_unlock(&siduidlock);
419
420
root = &gidtree;
421
spin_lock(&sidgidlock);
422
while ((node = rb_first(root)))
423
rb_erase(node, root);
424
spin_unlock(&sidgidlock);
425
}
426
427
/* if the two SIDs (roughly equivalent to a UUID for a user or group) are
428
the same returns 1, if they do not match returns 0 */
429
int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
430
{
431
int i;
432
int num_subauth, num_sat, num_saw;
433
434
if ((!ctsid) || (!cwsid))
435
return 1;
436
437
/* compare the revision */
438
if (ctsid->revision != cwsid->revision) {
439
if (ctsid->revision > cwsid->revision)
440
return 1;
441
else
442
return -1;
443
}
444
445
/* compare all of the six auth values */
446
for (i = 0; i < 6; ++i) {
447
if (ctsid->authority[i] != cwsid->authority[i]) {
448
if (ctsid->authority[i] > cwsid->authority[i])
449
return 1;
450
else
451
return -1;
452
}
453
}
454
455
/* compare all of the subauth values if any */
456
num_sat = ctsid->num_subauth;
457
num_saw = cwsid->num_subauth;
458
num_subauth = num_sat < num_saw ? num_sat : num_saw;
459
if (num_subauth) {
460
for (i = 0; i < num_subauth; ++i) {
461
if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
462
if (le32_to_cpu(ctsid->sub_auth[i]) >
463
le32_to_cpu(cwsid->sub_auth[i]))
464
return 1;
465
else
466
return -1;
467
}
468
}
469
}
470
471
return 0; /* sids compare/match */
472
}
473
474
475
/* copy ntsd, owner sid, and group sid from a security descriptor to another */
476
static void copy_sec_desc(const struct cifs_ntsd *pntsd,
477
struct cifs_ntsd *pnntsd, __u32 sidsoffset)
478
{
479
int i;
480
481
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
482
struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
483
484
/* copy security descriptor control portion */
485
pnntsd->revision = pntsd->revision;
486
pnntsd->type = pntsd->type;
487
pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
488
pnntsd->sacloffset = 0;
489
pnntsd->osidoffset = cpu_to_le32(sidsoffset);
490
pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
491
492
/* copy owner sid */
493
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
494
le32_to_cpu(pntsd->osidoffset));
495
nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
496
497
nowner_sid_ptr->revision = owner_sid_ptr->revision;
498
nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
499
for (i = 0; i < 6; i++)
500
nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
501
for (i = 0; i < 5; i++)
502
nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
503
504
/* copy group sid */
505
group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
506
le32_to_cpu(pntsd->gsidoffset));
507
ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
508
sizeof(struct cifs_sid));
509
510
ngroup_sid_ptr->revision = group_sid_ptr->revision;
511
ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
512
for (i = 0; i < 6; i++)
513
ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
514
for (i = 0; i < 5; i++)
515
ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
516
517
return;
518
}
519
520
521
/*
522
change posix mode to reflect permissions
523
pmode is the existing mode (we only want to overwrite part of this
524
bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
525
*/
526
static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
527
umode_t *pbits_to_set)
528
{
529
__u32 flags = le32_to_cpu(ace_flags);
530
/* the order of ACEs is important. The canonical order is to begin with
531
DENY entries followed by ALLOW, otherwise an allow entry could be
532
encountered first, making the subsequent deny entry like "dead code"
533
which would be superflous since Windows stops when a match is made
534
for the operation you are trying to perform for your user */
535
536
/* For deny ACEs we change the mask so that subsequent allow access
537
control entries do not turn on the bits we are denying */
538
if (type == ACCESS_DENIED) {
539
if (flags & GENERIC_ALL)
540
*pbits_to_set &= ~S_IRWXUGO;
541
542
if ((flags & GENERIC_WRITE) ||
543
((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
544
*pbits_to_set &= ~S_IWUGO;
545
if ((flags & GENERIC_READ) ||
546
((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
547
*pbits_to_set &= ~S_IRUGO;
548
if ((flags & GENERIC_EXECUTE) ||
549
((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
550
*pbits_to_set &= ~S_IXUGO;
551
return;
552
} else if (type != ACCESS_ALLOWED) {
553
cERROR(1, "unknown access control type %d", type);
554
return;
555
}
556
/* else ACCESS_ALLOWED type */
557
558
if (flags & GENERIC_ALL) {
559
*pmode |= (S_IRWXUGO & (*pbits_to_set));
560
cFYI(DBG2, "all perms");
561
return;
562
}
563
if ((flags & GENERIC_WRITE) ||
564
((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
565
*pmode |= (S_IWUGO & (*pbits_to_set));
566
if ((flags & GENERIC_READ) ||
567
((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
568
*pmode |= (S_IRUGO & (*pbits_to_set));
569
if ((flags & GENERIC_EXECUTE) ||
570
((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
571
*pmode |= (S_IXUGO & (*pbits_to_set));
572
573
cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
574
return;
575
}
576
577
/*
578
Generate access flags to reflect permissions mode is the existing mode.
579
This function is called for every ACE in the DACL whose SID matches
580
with either owner or group or everyone.
581
*/
582
583
static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
584
__u32 *pace_flags)
585
{
586
/* reset access mask */
587
*pace_flags = 0x0;
588
589
/* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
590
mode &= bits_to_use;
591
592
/* check for R/W/X UGO since we do not know whose flags
593
is this but we have cleared all the bits sans RWX for
594
either user or group or other as per bits_to_use */
595
if (mode & S_IRUGO)
596
*pace_flags |= SET_FILE_READ_RIGHTS;
597
if (mode & S_IWUGO)
598
*pace_flags |= SET_FILE_WRITE_RIGHTS;
599
if (mode & S_IXUGO)
600
*pace_flags |= SET_FILE_EXEC_RIGHTS;
601
602
cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
603
return;
604
}
605
606
static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
607
const struct cifs_sid *psid, __u64 nmode, umode_t bits)
608
{
609
int i;
610
__u16 size = 0;
611
__u32 access_req = 0;
612
613
pntace->type = ACCESS_ALLOWED;
614
pntace->flags = 0x0;
615
mode_to_access_flags(nmode, bits, &access_req);
616
if (!access_req)
617
access_req = SET_MINIMUM_RIGHTS;
618
pntace->access_req = cpu_to_le32(access_req);
619
620
pntace->sid.revision = psid->revision;
621
pntace->sid.num_subauth = psid->num_subauth;
622
for (i = 0; i < 6; i++)
623
pntace->sid.authority[i] = psid->authority[i];
624
for (i = 0; i < psid->num_subauth; i++)
625
pntace->sid.sub_auth[i] = psid->sub_auth[i];
626
627
size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
628
pntace->size = cpu_to_le16(size);
629
630
return size;
631
}
632
633
634
#ifdef CONFIG_CIFS_DEBUG2
635
static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
636
{
637
int num_subauth;
638
639
/* validate that we do not go past end of acl */
640
641
if (le16_to_cpu(pace->size) < 16) {
642
cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
643
return;
644
}
645
646
if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
647
cERROR(1, "ACL too small to parse ACE");
648
return;
649
}
650
651
num_subauth = pace->sid.num_subauth;
652
if (num_subauth) {
653
int i;
654
cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
655
pace->sid.revision, pace->sid.num_subauth, pace->type,
656
pace->flags, le16_to_cpu(pace->size));
657
for (i = 0; i < num_subauth; ++i) {
658
cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
659
le32_to_cpu(pace->sid.sub_auth[i]));
660
}
661
662
/* BB add length check to make sure that we do not have huge
663
num auths and therefore go off the end */
664
}
665
666
return;
667
}
668
#endif
669
670
671
static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
672
struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
673
struct cifs_fattr *fattr)
674
{
675
int i;
676
int num_aces = 0;
677
int acl_size;
678
char *acl_base;
679
struct cifs_ace **ppace;
680
681
/* BB need to add parm so we can store the SID BB */
682
683
if (!pdacl) {
684
/* no DACL in the security descriptor, set
685
all the permissions for user/group/other */
686
fattr->cf_mode |= S_IRWXUGO;
687
return;
688
}
689
690
/* validate that we do not go past end of acl */
691
if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
692
cERROR(1, "ACL too small to parse DACL");
693
return;
694
}
695
696
cFYI(DBG2, "DACL revision %d size %d num aces %d",
697
le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
698
le32_to_cpu(pdacl->num_aces));
699
700
/* reset rwx permissions for user/group/other.
701
Also, if num_aces is 0 i.e. DACL has no ACEs,
702
user/group/other have no permissions */
703
fattr->cf_mode &= ~(S_IRWXUGO);
704
705
acl_base = (char *)pdacl;
706
acl_size = sizeof(struct cifs_acl);
707
708
num_aces = le32_to_cpu(pdacl->num_aces);
709
if (num_aces > 0) {
710
umode_t user_mask = S_IRWXU;
711
umode_t group_mask = S_IRWXG;
712
umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
713
714
ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
715
GFP_KERNEL);
716
if (!ppace) {
717
cERROR(1, "DACL memory allocation error");
718
return;
719
}
720
721
for (i = 0; i < num_aces; ++i) {
722
ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
723
#ifdef CONFIG_CIFS_DEBUG2
724
dump_ace(ppace[i], end_of_acl);
725
#endif
726
if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
727
access_flags_to_mode(ppace[i]->access_req,
728
ppace[i]->type,
729
&fattr->cf_mode,
730
&user_mask);
731
if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
732
access_flags_to_mode(ppace[i]->access_req,
733
ppace[i]->type,
734
&fattr->cf_mode,
735
&group_mask);
736
if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
737
access_flags_to_mode(ppace[i]->access_req,
738
ppace[i]->type,
739
&fattr->cf_mode,
740
&other_mask);
741
if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
742
access_flags_to_mode(ppace[i]->access_req,
743
ppace[i]->type,
744
&fattr->cf_mode,
745
&other_mask);
746
747
748
/* memcpy((void *)(&(cifscred->aces[i])),
749
(void *)ppace[i],
750
sizeof(struct cifs_ace)); */
751
752
acl_base = (char *)ppace[i];
753
acl_size = le16_to_cpu(ppace[i]->size);
754
}
755
756
kfree(ppace);
757
}
758
759
return;
760
}
761
762
763
static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
764
struct cifs_sid *pgrpsid, __u64 nmode)
765
{
766
u16 size = 0;
767
struct cifs_acl *pnndacl;
768
769
pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
770
771
size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
772
pownersid, nmode, S_IRWXU);
773
size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
774
pgrpsid, nmode, S_IRWXG);
775
size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
776
&sid_everyone, nmode, S_IRWXO);
777
778
pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
779
pndacl->num_aces = cpu_to_le32(3);
780
781
return 0;
782
}
783
784
785
static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
786
{
787
/* BB need to add parm so we can store the SID BB */
788
789
/* validate that we do not go past end of ACL - sid must be at least 8
790
bytes long (assuming no sub-auths - e.g. the null SID */
791
if (end_of_acl < (char *)psid + 8) {
792
cERROR(1, "ACL too small to parse SID %p", psid);
793
return -EINVAL;
794
}
795
796
if (psid->num_subauth) {
797
#ifdef CONFIG_CIFS_DEBUG2
798
int i;
799
cFYI(1, "SID revision %d num_auth %d",
800
psid->revision, psid->num_subauth);
801
802
for (i = 0; i < psid->num_subauth; i++) {
803
cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
804
le32_to_cpu(psid->sub_auth[i]));
805
}
806
807
/* BB add length check to make sure that we do not have huge
808
num auths and therefore go off the end */
809
cFYI(1, "RID 0x%x",
810
le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
811
#endif
812
}
813
814
return 0;
815
}
816
817
818
/* Convert CIFS ACL to POSIX form */
819
static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
820
struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
821
{
822
int rc = 0;
823
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
824
struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
825
char *end_of_acl = ((char *)pntsd) + acl_len;
826
__u32 dacloffset;
827
828
if (pntsd == NULL)
829
return -EIO;
830
831
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
832
le32_to_cpu(pntsd->osidoffset));
833
group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
834
le32_to_cpu(pntsd->gsidoffset));
835
dacloffset = le32_to_cpu(pntsd->dacloffset);
836
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
837
cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
838
"sacloffset 0x%x dacloffset 0x%x",
839
pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
840
le32_to_cpu(pntsd->gsidoffset),
841
le32_to_cpu(pntsd->sacloffset), dacloffset);
842
/* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
843
rc = parse_sid(owner_sid_ptr, end_of_acl);
844
if (rc) {
845
cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
846
return rc;
847
}
848
rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
849
if (rc) {
850
cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
851
return rc;
852
}
853
854
rc = parse_sid(group_sid_ptr, end_of_acl);
855
if (rc) {
856
cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
857
return rc;
858
}
859
rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
860
if (rc) {
861
cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
862
return rc;
863
}
864
865
if (dacloffset)
866
parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
867
group_sid_ptr, fattr);
868
else
869
cFYI(1, "no ACL"); /* BB grant all or default perms? */
870
871
/* cifscred->uid = owner_sid_ptr->rid;
872
cifscred->gid = group_sid_ptr->rid;
873
memcpy((void *)(&(cifscred->osid)), (void *)owner_sid_ptr,
874
sizeof(struct cifs_sid));
875
memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr,
876
sizeof(struct cifs_sid)); */
877
878
return rc;
879
}
880
881
882
/* Convert permission bits from mode to equivalent CIFS ACL */
883
static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
884
struct inode *inode, __u64 nmode)
885
{
886
int rc = 0;
887
__u32 dacloffset;
888
__u32 ndacloffset;
889
__u32 sidsoffset;
890
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
891
struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
892
struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
893
894
if ((inode == NULL) || (pntsd == NULL) || (pnntsd == NULL))
895
return -EIO;
896
897
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
898
le32_to_cpu(pntsd->osidoffset));
899
group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
900
le32_to_cpu(pntsd->gsidoffset));
901
902
dacloffset = le32_to_cpu(pntsd->dacloffset);
903
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
904
905
ndacloffset = sizeof(struct cifs_ntsd);
906
ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
907
ndacl_ptr->revision = dacl_ptr->revision;
908
ndacl_ptr->size = 0;
909
ndacl_ptr->num_aces = 0;
910
911
rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr, nmode);
912
913
sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
914
915
/* copy security descriptor control portion and owner and group sid */
916
copy_sec_desc(pntsd, pnntsd, sidsoffset);
917
918
return rc;
919
}
920
921
static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
922
__u16 fid, u32 *pacllen)
923
{
924
struct cifs_ntsd *pntsd = NULL;
925
int xid, rc;
926
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
927
928
if (IS_ERR(tlink))
929
return ERR_CAST(tlink);
930
931
xid = GetXid();
932
rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
933
FreeXid(xid);
934
935
cifs_put_tlink(tlink);
936
937
cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
938
if (rc)
939
return ERR_PTR(rc);
940
return pntsd;
941
}
942
943
static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
944
const char *path, u32 *pacllen)
945
{
946
struct cifs_ntsd *pntsd = NULL;
947
int oplock = 0;
948
int xid, rc;
949
__u16 fid;
950
struct cifs_tcon *tcon;
951
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
952
953
if (IS_ERR(tlink))
954
return ERR_CAST(tlink);
955
956
tcon = tlink_tcon(tlink);
957
xid = GetXid();
958
959
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0,
960
&fid, &oplock, NULL, cifs_sb->local_nls,
961
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
962
if (!rc) {
963
rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
964
CIFSSMBClose(xid, tcon, fid);
965
}
966
967
cifs_put_tlink(tlink);
968
FreeXid(xid);
969
970
cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
971
if (rc)
972
return ERR_PTR(rc);
973
return pntsd;
974
}
975
976
/* Retrieve an ACL from the server */
977
struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
978
struct inode *inode, const char *path,
979
u32 *pacllen)
980
{
981
struct cifs_ntsd *pntsd = NULL;
982
struct cifsFileInfo *open_file = NULL;
983
984
if (inode)
985
open_file = find_readable_file(CIFS_I(inode), true);
986
if (!open_file)
987
return get_cifs_acl_by_path(cifs_sb, path, pacllen);
988
989
pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->netfid, pacllen);
990
cifsFileInfo_put(open_file);
991
return pntsd;
992
}
993
994
static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid,
995
struct cifs_ntsd *pnntsd, u32 acllen)
996
{
997
int xid, rc;
998
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
999
1000
if (IS_ERR(tlink))
1001
return PTR_ERR(tlink);
1002
1003
xid = GetXid();
1004
rc = CIFSSMBSetCIFSACL(xid, tlink_tcon(tlink), fid, pnntsd, acllen);
1005
FreeXid(xid);
1006
cifs_put_tlink(tlink);
1007
1008
cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1009
return rc;
1010
}
1011
1012
static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
1013
struct cifs_ntsd *pnntsd, u32 acllen)
1014
{
1015
int oplock = 0;
1016
int xid, rc;
1017
__u16 fid;
1018
struct cifs_tcon *tcon;
1019
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1020
1021
if (IS_ERR(tlink))
1022
return PTR_ERR(tlink);
1023
1024
tcon = tlink_tcon(tlink);
1025
xid = GetXid();
1026
1027
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, WRITE_DAC, 0,
1028
&fid, &oplock, NULL, cifs_sb->local_nls,
1029
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1030
if (rc) {
1031
cERROR(1, "Unable to open file to set ACL");
1032
goto out;
1033
}
1034
1035
rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen);
1036
cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1037
1038
CIFSSMBClose(xid, tcon, fid);
1039
out:
1040
FreeXid(xid);
1041
cifs_put_tlink(tlink);
1042
return rc;
1043
}
1044
1045
/* Set an ACL on the server */
1046
int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1047
struct inode *inode, const char *path)
1048
{
1049
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1050
struct cifsFileInfo *open_file;
1051
int rc;
1052
1053
cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
1054
1055
open_file = find_readable_file(CIFS_I(inode), true);
1056
if (!open_file)
1057
return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
1058
1059
rc = set_cifs_acl_by_fid(cifs_sb, open_file->netfid, pnntsd, acllen);
1060
cifsFileInfo_put(open_file);
1061
return rc;
1062
}
1063
1064
/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1065
int
1066
cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1067
struct inode *inode, const char *path, const __u16 *pfid)
1068
{
1069
struct cifs_ntsd *pntsd = NULL;
1070
u32 acllen = 0;
1071
int rc = 0;
1072
1073
cFYI(DBG2, "converting ACL to mode for %s", path);
1074
1075
if (pfid)
1076
pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1077
else
1078
pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1079
1080
/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1081
if (IS_ERR(pntsd)) {
1082
rc = PTR_ERR(pntsd);
1083
cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1084
} else {
1085
rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1086
kfree(pntsd);
1087
if (rc)
1088
cERROR(1, "parse sec desc failed rc = %d", rc);
1089
}
1090
1091
return rc;
1092
}
1093
1094
/* Convert mode bits to an ACL so we can update the ACL on the server */
1095
int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
1096
{
1097
int rc = 0;
1098
__u32 secdesclen = 0;
1099
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1100
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1101
1102
cFYI(DBG2, "set ACL from mode for %s", path);
1103
1104
/* Get the security descriptor */
1105
pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1106
1107
/* Add three ACEs for owner, group, everyone getting rid of
1108
other ACEs as chmod disables ACEs and set the security descriptor */
1109
1110
if (IS_ERR(pntsd)) {
1111
rc = PTR_ERR(pntsd);
1112
cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1113
} else {
1114
/* allocate memory for the smb header,
1115
set security descriptor request security descriptor
1116
parameters, and secuirty descriptor itself */
1117
1118
secdesclen = secdesclen < DEFSECDESCLEN ?
1119
DEFSECDESCLEN : secdesclen;
1120
pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1121
if (!pnntsd) {
1122
cERROR(1, "Unable to allocate security descriptor");
1123
kfree(pntsd);
1124
return -ENOMEM;
1125
}
1126
1127
rc = build_sec_desc(pntsd, pnntsd, inode, nmode);
1128
1129
cFYI(DBG2, "build_sec_desc rc: %d", rc);
1130
1131
if (!rc) {
1132
/* Set the security descriptor */
1133
rc = set_cifs_acl(pnntsd, secdesclen, inode, path);
1134
cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1135
}
1136
1137
kfree(pnntsd);
1138
kfree(pntsd);
1139
}
1140
1141
return rc;
1142
}
1143
1144