4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Contains the routines for mapping CIFS/NTFS ACLs
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone = {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers = {
41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
43 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
45 const struct cred *root_cred;
48 shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
53 struct cifs_sid_id *psidid;
55 node = rb_first(root);
59 psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
60 if (nr_to_scan == 0 || *nr_del == nr_to_scan)
63 if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
64 && psidid->refcount == 0) {
74 * Run idmap cache shrinker.
77 cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
79 int nr_to_scan = sc->nr_to_scan;
85 spin_lock(&siduidlock);
86 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
87 spin_unlock(&siduidlock);
90 spin_lock(&sidgidlock);
91 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
92 spin_unlock(&sidgidlock);
95 spin_lock(&uidsidlock);
96 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
97 spin_unlock(&uidsidlock);
100 spin_lock(&gidsidlock);
101 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
102 spin_unlock(&gidsidlock);
108 sid_rb_insert(struct rb_root *root, unsigned long cid,
109 struct cifs_sid_id **psidid, char *typestr)
112 struct rb_node *node = root->rb_node;
113 struct rb_node *parent = NULL;
114 struct rb_node **linkto = &(root->rb_node);
115 struct cifs_sid_id *lsidid;
118 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
120 if (cid > lsidid->id) {
121 linkto = &(node->rb_left);
122 node = node->rb_left;
124 if (cid < lsidid->id) {
125 linkto = &(node->rb_right);
126 node = node->rb_right;
131 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
132 (*psidid)->refcount = 0;
134 sprintf((*psidid)->sidstr, "%s", typestr);
135 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
136 sprintf(strptr, "%ld", cid);
138 clear_bit(SID_ID_PENDING, &(*psidid)->state);
139 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
141 rb_link_node(&(*psidid)->rbnode, parent, linkto);
142 rb_insert_color(&(*psidid)->rbnode, root);
145 static struct cifs_sid_id *
146 sid_rb_search(struct rb_root *root, unsigned long cid)
148 struct rb_node *node = root->rb_node;
149 struct cifs_sid_id *lsidid;
152 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
153 if (cid > lsidid->id)
154 node = node->rb_left;
155 else if (cid < lsidid->id)
156 node = node->rb_right;
157 else /* node found */
164 static struct shrinker cifs_shrinker = {
165 .shrink = cifs_idmap_shrinker,
166 .seeks = DEFAULT_SEEKS,
170 cifs_idmap_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
174 payload = kmalloc(prep->datalen, GFP_KERNEL);
178 memcpy(payload, prep->data, prep->datalen);
179 key->payload.data = payload;
180 key->datalen = prep->datalen;
185 cifs_idmap_key_destroy(struct key *key)
187 kfree(key->payload.data);
190 struct key_type cifs_idmap_key_type = {
191 .name = "cifs.idmap",
192 .instantiate = cifs_idmap_key_instantiate,
193 .destroy = cifs_idmap_key_destroy,
194 .describe = user_describe,
199 sid_to_str(struct cifs_sid *sidptr, char *sidstr)
207 sprintf(strptr, "%s", "S");
208 strptr = sidstr + strlen(sidstr);
210 sprintf(strptr, "-%d", sidptr->revision);
211 strptr = sidstr + strlen(sidstr);
213 for (i = 0; i < 6; ++i) {
214 if (sidptr->authority[i]) {
215 sprintf(strptr, "-%d", sidptr->authority[i]);
216 strptr = sidstr + strlen(sidstr);
220 for (i = 0; i < sidptr->num_subauth; ++i) {
221 saval = le32_to_cpu(sidptr->sub_auth[i]);
222 sprintf(strptr, "-%ld", saval);
223 strptr = sidstr + strlen(sidstr);
228 cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
230 memcpy(dst, src, sizeof(*dst));
231 dst->num_subauth = min_t(u8, src->num_subauth, NUM_SUBAUTHS);
235 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
236 struct cifs_sid_id **psidid, char *typestr)
240 struct rb_node *node = root->rb_node;
241 struct rb_node *parent = NULL;
242 struct rb_node **linkto = &(root->rb_node);
243 struct cifs_sid_id *lsidid;
246 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
248 rc = compare_sids(sidptr, &((lsidid)->sid));
250 linkto = &(node->rb_left);
251 node = node->rb_left;
253 linkto = &(node->rb_right);
254 node = node->rb_right;
258 cifs_copy_sid(&(*psidid)->sid, sidptr);
259 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
260 (*psidid)->refcount = 0;
262 sprintf((*psidid)->sidstr, "%s", typestr);
263 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
264 sid_to_str(&(*psidid)->sid, strptr);
266 clear_bit(SID_ID_PENDING, &(*psidid)->state);
267 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
269 rb_link_node(&(*psidid)->rbnode, parent, linkto);
270 rb_insert_color(&(*psidid)->rbnode, root);
273 static struct cifs_sid_id *
274 id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
277 struct rb_node *node = root->rb_node;
278 struct cifs_sid_id *lsidid;
281 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
282 rc = compare_sids(sidptr, &((lsidid)->sid));
284 node = node->rb_left;
286 node = node->rb_right;
287 } else /* node found */
295 sidid_pending_wait(void *unused)
298 return signal_pending(current) ? -ERESTARTSYS : 0;
302 id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
306 const struct cred *saved_cred;
307 struct cifs_sid *lsid;
308 struct cifs_sid_id *psidid, *npsidid;
309 struct rb_root *cidtree;
312 if (sidtype == SIDOWNER) {
313 cidlock = &siduidlock;
315 } else if (sidtype == SIDGROUP) {
316 cidlock = &sidgidlock;
322 psidid = sid_rb_search(cidtree, cid);
324 if (!psidid) { /* node does not exist, allocate one & attempt adding */
325 spin_unlock(cidlock);
326 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
330 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
331 if (!npsidid->sidstr) {
337 psidid = sid_rb_search(cidtree, cid);
338 if (psidid) { /* node happened to get inserted meanwhile */
340 spin_unlock(cidlock);
341 kfree(npsidid->sidstr);
345 sid_rb_insert(cidtree, cid, &psidid,
346 sidtype == SIDOWNER ? "oi:" : "gi:");
348 spin_unlock(cidlock);
352 spin_unlock(cidlock);
356 * If we are here, it is safe to access psidid and its fields
357 * since a reference was taken earlier while holding the spinlock.
358 * A reference on the node is put without holding the spinlock
359 * and it is OK to do so in this case, shrinker will not erase
360 * this node until all references are put and we do not access
361 * any fields of the node after a reference is put .
363 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
364 cifs_copy_sid(ssid, &psidid->sid);
365 psidid->time = jiffies; /* update ts for accessing */
369 if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
374 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
375 saved_cred = override_creds(root_cred);
376 sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
377 if (IS_ERR(sidkey)) {
379 cFYI(1, "%s: Can't map and id to a SID", __func__);
380 } else if (sidkey->datalen < sizeof(struct cifs_sid)) {
382 cFYI(1, "%s: Downcall contained malformed key "
383 "(datalen=%hu)", __func__, sidkey->datalen);
385 lsid = (struct cifs_sid *)sidkey->payload.data;
386 cifs_copy_sid(&psidid->sid, lsid);
387 cifs_copy_sid(ssid, &psidid->sid);
388 set_bit(SID_ID_MAPPED, &psidid->state);
390 kfree(psidid->sidstr);
392 psidid->time = jiffies; /* update ts for accessing */
393 revert_creds(saved_cred);
394 clear_bit(SID_ID_PENDING, &psidid->state);
395 wake_up_bit(&psidid->state, SID_ID_PENDING);
397 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
398 sidid_pending_wait, TASK_INTERRUPTIBLE);
400 cFYI(1, "%s: sidid_pending_wait interrupted %d",
405 if (test_bit(SID_ID_MAPPED, &psidid->state))
406 cifs_copy_sid(ssid, &psidid->sid);
416 sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
417 struct cifs_fattr *fattr, uint sidtype)
422 const struct cred *saved_cred;
423 struct cifs_sid_id *psidid, *npsidid;
424 struct rb_root *cidtree;
427 if (sidtype == SIDOWNER) {
428 cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
429 cidlock = &siduidlock;
431 } else if (sidtype == SIDGROUP) {
432 cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
433 cidlock = &sidgidlock;
439 psidid = id_rb_search(cidtree, psid);
441 if (!psidid) { /* node does not exist, allocate one & attempt adding */
442 spin_unlock(cidlock);
443 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
447 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
448 if (!npsidid->sidstr) {
454 psidid = id_rb_search(cidtree, psid);
455 if (psidid) { /* node happened to get inserted meanwhile */
457 spin_unlock(cidlock);
458 kfree(npsidid->sidstr);
462 id_rb_insert(cidtree, psid, &psidid,
463 sidtype == SIDOWNER ? "os:" : "gs:");
465 spin_unlock(cidlock);
469 spin_unlock(cidlock);
473 * If we are here, it is safe to access psidid and its fields
474 * since a reference was taken earlier while holding the spinlock.
475 * A reference on the node is put without holding the spinlock
476 * and it is OK to do so in this case, shrinker will not erase
477 * this node until all references are put and we do not access
478 * any fields of the node after a reference is put .
480 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
482 psidid->time = jiffies; /* update ts for accessing */
486 if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
489 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
490 saved_cred = override_creds(root_cred);
491 idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
493 cFYI(1, "%s: Can't map SID to an id", __func__);
495 cid = *(unsigned long *)idkey->payload.value;
497 set_bit(SID_ID_MAPPED, &psidid->state);
499 kfree(psidid->sidstr);
501 revert_creds(saved_cred);
502 psidid->time = jiffies; /* update ts for accessing */
503 clear_bit(SID_ID_PENDING, &psidid->state);
504 wake_up_bit(&psidid->state, SID_ID_PENDING);
506 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
507 sidid_pending_wait, TASK_INTERRUPTIBLE);
509 cFYI(1, "%s: sidid_pending_wait interrupted %d",
511 --psidid->refcount; /* decremented without spinlock */
514 if (test_bit(SID_ID_MAPPED, &psidid->state))
519 --psidid->refcount; /* decremented without spinlock */
520 if (sidtype == SIDOWNER)
529 init_cifs_idmap(void)
535 cFYI(1, "Registering the %s key type", cifs_idmap_key_type.name);
537 /* create an override credential set with a special thread keyring in
538 * which requests are cached
540 * this is used to prevent malicious redirections from being installed
543 cred = prepare_kernel_cred(NULL);
547 keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
548 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
549 KEY_USR_VIEW | KEY_USR_READ,
550 KEY_ALLOC_NOT_IN_QUOTA);
551 if (IS_ERR(keyring)) {
552 ret = PTR_ERR(keyring);
553 goto failed_put_cred;
556 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
560 ret = register_key_type(&cifs_idmap_key_type);
564 /* instruct request_key() to use this special keyring as a cache for
565 * the results it looks up */
566 set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
567 cred->thread_keyring = keyring;
568 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
571 spin_lock_init(&siduidlock);
573 spin_lock_init(&sidgidlock);
576 spin_lock_init(&uidsidlock);
577 siduidtree = RB_ROOT;
578 spin_lock_init(&gidsidlock);
579 sidgidtree = RB_ROOT;
580 register_shrinker(&cifs_shrinker);
582 cFYI(1, "cifs idmap keyring: %d", key_serial(keyring));
593 exit_cifs_idmap(void)
595 key_revoke(root_cred->thread_keyring);
596 unregister_key_type(&cifs_idmap_key_type);
598 unregister_shrinker(&cifs_shrinker);
599 cFYI(1, "Unregistered %s key type", cifs_idmap_key_type.name);
603 cifs_destroy_idmaptrees(void)
605 struct rb_root *root;
606 struct rb_node *node;
609 spin_lock(&siduidlock);
610 while ((node = rb_first(root)))
611 rb_erase(node, root);
612 spin_unlock(&siduidlock);
615 spin_lock(&sidgidlock);
616 while ((node = rb_first(root)))
617 rb_erase(node, root);
618 spin_unlock(&sidgidlock);
621 spin_lock(&uidsidlock);
622 while ((node = rb_first(root)))
623 rb_erase(node, root);
624 spin_unlock(&uidsidlock);
627 spin_lock(&gidsidlock);
628 while ((node = rb_first(root)))
629 rb_erase(node, root);
630 spin_unlock(&gidsidlock);
633 /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
634 the same returns 1, if they do not match returns 0 */
635 int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
638 int num_subauth, num_sat, num_saw;
640 if ((!ctsid) || (!cwsid))
643 /* compare the revision */
644 if (ctsid->revision != cwsid->revision) {
645 if (ctsid->revision > cwsid->revision)
651 /* compare all of the six auth values */
652 for (i = 0; i < 6; ++i) {
653 if (ctsid->authority[i] != cwsid->authority[i]) {
654 if (ctsid->authority[i] > cwsid->authority[i])
661 /* compare all of the subauth values if any */
662 num_sat = ctsid->num_subauth;
663 num_saw = cwsid->num_subauth;
664 num_subauth = num_sat < num_saw ? num_sat : num_saw;
666 for (i = 0; i < num_subauth; ++i) {
667 if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
668 if (le32_to_cpu(ctsid->sub_auth[i]) >
669 le32_to_cpu(cwsid->sub_auth[i]))
677 return 0; /* sids compare/match */
681 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
682 static void copy_sec_desc(const struct cifs_ntsd *pntsd,
683 struct cifs_ntsd *pnntsd, __u32 sidsoffset)
685 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
686 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
688 /* copy security descriptor control portion */
689 pnntsd->revision = pntsd->revision;
690 pnntsd->type = pntsd->type;
691 pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
692 pnntsd->sacloffset = 0;
693 pnntsd->osidoffset = cpu_to_le32(sidsoffset);
694 pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
697 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
698 le32_to_cpu(pntsd->osidoffset));
699 nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
700 cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
703 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
704 le32_to_cpu(pntsd->gsidoffset));
705 ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
706 sizeof(struct cifs_sid));
707 cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
714 change posix mode to reflect permissions
715 pmode is the existing mode (we only want to overwrite part of this
716 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
718 static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
719 umode_t *pbits_to_set)
721 __u32 flags = le32_to_cpu(ace_flags);
722 /* the order of ACEs is important. The canonical order is to begin with
723 DENY entries followed by ALLOW, otherwise an allow entry could be
724 encountered first, making the subsequent deny entry like "dead code"
725 which would be superflous since Windows stops when a match is made
726 for the operation you are trying to perform for your user */
728 /* For deny ACEs we change the mask so that subsequent allow access
729 control entries do not turn on the bits we are denying */
730 if (type == ACCESS_DENIED) {
731 if (flags & GENERIC_ALL)
732 *pbits_to_set &= ~S_IRWXUGO;
734 if ((flags & GENERIC_WRITE) ||
735 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
736 *pbits_to_set &= ~S_IWUGO;
737 if ((flags & GENERIC_READ) ||
738 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
739 *pbits_to_set &= ~S_IRUGO;
740 if ((flags & GENERIC_EXECUTE) ||
741 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
742 *pbits_to_set &= ~S_IXUGO;
744 } else if (type != ACCESS_ALLOWED) {
745 cERROR(1, "unknown access control type %d", type);
748 /* else ACCESS_ALLOWED type */
750 if (flags & GENERIC_ALL) {
751 *pmode |= (S_IRWXUGO & (*pbits_to_set));
752 cFYI(DBG2, "all perms");
755 if ((flags & GENERIC_WRITE) ||
756 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
757 *pmode |= (S_IWUGO & (*pbits_to_set));
758 if ((flags & GENERIC_READ) ||
759 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
760 *pmode |= (S_IRUGO & (*pbits_to_set));
761 if ((flags & GENERIC_EXECUTE) ||
762 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
763 *pmode |= (S_IXUGO & (*pbits_to_set));
765 cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
770 Generate access flags to reflect permissions mode is the existing mode.
771 This function is called for every ACE in the DACL whose SID matches
772 with either owner or group or everyone.
775 static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
778 /* reset access mask */
781 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
784 /* check for R/W/X UGO since we do not know whose flags
785 is this but we have cleared all the bits sans RWX for
786 either user or group or other as per bits_to_use */
788 *pace_flags |= SET_FILE_READ_RIGHTS;
790 *pace_flags |= SET_FILE_WRITE_RIGHTS;
792 *pace_flags |= SET_FILE_EXEC_RIGHTS;
794 cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
798 static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
799 const struct cifs_sid *psid, __u64 nmode, umode_t bits)
803 __u32 access_req = 0;
805 pntace->type = ACCESS_ALLOWED;
807 mode_to_access_flags(nmode, bits, &access_req);
809 access_req = SET_MINIMUM_RIGHTS;
810 pntace->access_req = cpu_to_le32(access_req);
812 pntace->sid.revision = psid->revision;
813 pntace->sid.num_subauth = psid->num_subauth;
814 for (i = 0; i < 6; i++)
815 pntace->sid.authority[i] = psid->authority[i];
816 for (i = 0; i < psid->num_subauth; i++)
817 pntace->sid.sub_auth[i] = psid->sub_auth[i];
819 size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
820 pntace->size = cpu_to_le16(size);
826 #ifdef CONFIG_CIFS_DEBUG2
827 static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
831 /* validate that we do not go past end of acl */
833 if (le16_to_cpu(pace->size) < 16) {
834 cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
838 if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
839 cERROR(1, "ACL too small to parse ACE");
843 num_subauth = pace->sid.num_subauth;
846 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
847 pace->sid.revision, pace->sid.num_subauth, pace->type,
848 pace->flags, le16_to_cpu(pace->size));
849 for (i = 0; i < num_subauth; ++i) {
850 cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
851 le32_to_cpu(pace->sid.sub_auth[i]));
854 /* BB add length check to make sure that we do not have huge
855 num auths and therefore go off the end */
863 static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
864 struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
865 struct cifs_fattr *fattr)
871 struct cifs_ace **ppace;
873 /* BB need to add parm so we can store the SID BB */
876 /* no DACL in the security descriptor, set
877 all the permissions for user/group/other */
878 fattr->cf_mode |= S_IRWXUGO;
882 /* validate that we do not go past end of acl */
883 if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
884 cERROR(1, "ACL too small to parse DACL");
888 cFYI(DBG2, "DACL revision %d size %d num aces %d",
889 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
890 le32_to_cpu(pdacl->num_aces));
892 /* reset rwx permissions for user/group/other.
893 Also, if num_aces is 0 i.e. DACL has no ACEs,
894 user/group/other have no permissions */
895 fattr->cf_mode &= ~(S_IRWXUGO);
897 acl_base = (char *)pdacl;
898 acl_size = sizeof(struct cifs_acl);
900 num_aces = le32_to_cpu(pdacl->num_aces);
902 umode_t user_mask = S_IRWXU;
903 umode_t group_mask = S_IRWXG;
904 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
906 if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
908 ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
911 cERROR(1, "DACL memory allocation error");
915 for (i = 0; i < num_aces; ++i) {
916 ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
917 #ifdef CONFIG_CIFS_DEBUG2
918 dump_ace(ppace[i], end_of_acl);
920 if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
921 access_flags_to_mode(ppace[i]->access_req,
925 if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
926 access_flags_to_mode(ppace[i]->access_req,
930 if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
931 access_flags_to_mode(ppace[i]->access_req,
935 if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
936 access_flags_to_mode(ppace[i]->access_req,
942 /* memcpy((void *)(&(cifscred->aces[i])),
944 sizeof(struct cifs_ace)); */
946 acl_base = (char *)ppace[i];
947 acl_size = le16_to_cpu(ppace[i]->size);
957 static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
958 struct cifs_sid *pgrpsid, __u64 nmode)
961 struct cifs_acl *pnndacl;
963 pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
965 size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
966 pownersid, nmode, S_IRWXU);
967 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
968 pgrpsid, nmode, S_IRWXG);
969 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
970 &sid_everyone, nmode, S_IRWXO);
972 pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
973 pndacl->num_aces = cpu_to_le32(3);
979 static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
981 /* BB need to add parm so we can store the SID BB */
983 /* validate that we do not go past end of ACL - sid must be at least 8
984 bytes long (assuming no sub-auths - e.g. the null SID */
985 if (end_of_acl < (char *)psid + 8) {
986 cERROR(1, "ACL too small to parse SID %p", psid);
990 if (psid->num_subauth) {
991 #ifdef CONFIG_CIFS_DEBUG2
993 cFYI(1, "SID revision %d num_auth %d",
994 psid->revision, psid->num_subauth);
996 for (i = 0; i < psid->num_subauth; i++) {
997 cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
998 le32_to_cpu(psid->sub_auth[i]));
1001 /* BB add length check to make sure that we do not have huge
1002 num auths and therefore go off the end */
1004 le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
1012 /* Convert CIFS ACL to POSIX form */
1013 static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
1014 struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
1017 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1018 struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
1019 char *end_of_acl = ((char *)pntsd) + acl_len;
1025 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1026 le32_to_cpu(pntsd->osidoffset));
1027 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1028 le32_to_cpu(pntsd->gsidoffset));
1029 dacloffset = le32_to_cpu(pntsd->dacloffset);
1030 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1031 cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1032 "sacloffset 0x%x dacloffset 0x%x",
1033 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
1034 le32_to_cpu(pntsd->gsidoffset),
1035 le32_to_cpu(pntsd->sacloffset), dacloffset);
1036 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1037 rc = parse_sid(owner_sid_ptr, end_of_acl);
1039 cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
1042 rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
1044 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
1048 rc = parse_sid(group_sid_ptr, end_of_acl);
1050 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
1053 rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
1055 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
1060 parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
1061 group_sid_ptr, fattr);
1063 cFYI(1, "no ACL"); /* BB grant all or default perms? */
1068 /* Convert permission bits from mode to equivalent CIFS ACL */
1069 static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
1070 __u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
1076 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1077 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
1078 struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
1079 struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
1081 if (nmode != NO_CHANGE_64) { /* chmod */
1082 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1083 le32_to_cpu(pntsd->osidoffset));
1084 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1085 le32_to_cpu(pntsd->gsidoffset));
1086 dacloffset = le32_to_cpu(pntsd->dacloffset);
1087 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1088 ndacloffset = sizeof(struct cifs_ntsd);
1089 ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
1090 ndacl_ptr->revision = dacl_ptr->revision;
1091 ndacl_ptr->size = 0;
1092 ndacl_ptr->num_aces = 0;
1094 rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
1096 sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
1097 /* copy sec desc control portion & owner and group sids */
1098 copy_sec_desc(pntsd, pnntsd, sidsoffset);
1099 *aclflag = CIFS_ACL_DACL;
1101 memcpy(pnntsd, pntsd, secdesclen);
1102 if (uid != NO_CHANGE_32) { /* chown */
1103 owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1104 le32_to_cpu(pnntsd->osidoffset));
1105 nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1107 if (!nowner_sid_ptr)
1109 rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
1111 cFYI(1, "%s: Mapping error %d for owner id %d",
1113 kfree(nowner_sid_ptr);
1116 cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr);
1117 kfree(nowner_sid_ptr);
1118 *aclflag = CIFS_ACL_OWNER;
1120 if (gid != NO_CHANGE_32) { /* chgrp */
1121 group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1122 le32_to_cpu(pnntsd->gsidoffset));
1123 ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1125 if (!ngroup_sid_ptr)
1127 rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
1129 cFYI(1, "%s: Mapping error %d for group id %d",
1131 kfree(ngroup_sid_ptr);
1134 cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr);
1135 kfree(ngroup_sid_ptr);
1136 *aclflag = CIFS_ACL_GROUP;
1143 static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
1144 __u16 fid, u32 *pacllen)
1146 struct cifs_ntsd *pntsd = NULL;
1149 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1152 return ERR_CAST(tlink);
1155 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
1158 cifs_put_tlink(tlink);
1160 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1166 static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
1167 const char *path, u32 *pacllen)
1169 struct cifs_ntsd *pntsd = NULL;
1172 int rc, create_options = 0;
1174 struct cifs_tcon *tcon;
1175 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1178 return ERR_CAST(tlink);
1180 tcon = tlink_tcon(tlink);
1183 if (backup_cred(cifs_sb))
1184 create_options |= CREATE_OPEN_BACKUP_INTENT;
1186 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
1187 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1188 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1190 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
1191 CIFSSMBClose(xid, tcon, fid);
1194 cifs_put_tlink(tlink);
1197 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1203 /* Retrieve an ACL from the server */
1204 struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
1205 struct inode *inode, const char *path,
1208 struct cifs_ntsd *pntsd = NULL;
1209 struct cifsFileInfo *open_file = NULL;
1212 open_file = find_readable_file(CIFS_I(inode), true);
1214 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
1216 pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen);
1217 cifsFileInfo_put(open_file);
1221 /* Set an ACL on the server */
1222 int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1223 struct inode *inode, const char *path, int aclflag)
1227 int rc, access_flags, create_options = 0;
1229 struct cifs_tcon *tcon;
1230 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1231 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1234 return PTR_ERR(tlink);
1236 tcon = tlink_tcon(tlink);
1239 if (backup_cred(cifs_sb))
1240 create_options |= CREATE_OPEN_BACKUP_INTENT;
1242 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
1243 access_flags = WRITE_OWNER;
1245 access_flags = WRITE_DAC;
1247 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
1248 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1249 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1251 cERROR(1, "Unable to open file to set ACL");
1255 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
1256 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1258 CIFSSMBClose(xid, tcon, fid);
1261 cifs_put_tlink(tlink);
1265 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1267 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1268 struct inode *inode, const char *path, const __u16 *pfid)
1270 struct cifs_ntsd *pntsd = NULL;
1274 cFYI(DBG2, "converting ACL to mode for %s", path);
1277 pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1279 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1281 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1282 if (IS_ERR(pntsd)) {
1283 rc = PTR_ERR(pntsd);
1284 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1286 rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1289 cERROR(1, "parse sec desc failed rc = %d", rc);
1295 /* Convert mode bits to an ACL so we can update the ACL on the server */
1297 id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1298 uid_t uid, gid_t gid)
1301 int aclflag = CIFS_ACL_DACL; /* default flag to set */
1302 __u32 secdesclen = 0;
1303 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1304 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1306 cFYI(DBG2, "set ACL from mode for %s", path);
1308 /* Get the security descriptor */
1309 pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1311 /* Add three ACEs for owner, group, everyone getting rid of
1312 other ACEs as chmod disables ACEs and set the security descriptor */
1314 if (IS_ERR(pntsd)) {
1315 rc = PTR_ERR(pntsd);
1316 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1318 /* allocate memory for the smb header,
1319 set security descriptor request security descriptor
1320 parameters, and secuirty descriptor itself */
1322 secdesclen = secdesclen < DEFSECDESCLEN ?
1323 DEFSECDESCLEN : secdesclen;
1324 pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1326 cERROR(1, "Unable to allocate security descriptor");
1331 rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
1334 cFYI(DBG2, "build_sec_desc rc: %d", rc);
1337 /* Set the security descriptor */
1338 rc = set_cifs_acl(pnntsd, secdesclen, inode,
1340 cFYI(DBG2, "set_cifs_acl rc: %d", rc);