1 // SPDX-License-Identifier: GPL-2.0-only
3 * AppArmor security module
5 * This file contains AppArmor functions for unpacking policy loaded from
8 * Copyright (C) 1998-2008 Novell/SUSE
9 * Copyright 2009-2010 Canonical Ltd.
11 * AppArmor uses a serialized binary format for loading policy. To find
12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
13 * All policy is validated before it is used.
16 #include <asm/unaligned.h>
17 #include <linux/ctype.h>
18 #include <linux/errno.h>
19 #include <linux/zstd.h>
21 #include "include/apparmor.h"
22 #include "include/audit.h"
23 #include "include/cred.h"
24 #include "include/crypto.h"
25 #include "include/file.h"
26 #include "include/match.h"
27 #include "include/path.h"
28 #include "include/policy.h"
29 #include "include/policy_unpack.h"
31 #define K_ABI_MASK 0x3ff
32 #define FORCE_COMPLAIN_FLAG 0x800
33 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
34 #define VERSION_LE(X, Y) (((X) & K_ABI_MASK) <= ((Y) & K_ABI_MASK))
35 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
37 #define v5 5 /* base version */
38 #define v6 6 /* per entry policydb mediation check */
40 #define v8 8 /* full network masking */
41 #define v9 9 /* xbits are used as permission bits in policydb */
44 * The AppArmor interface treats data as a type byte followed by the
45 * actual data. The interface has the notion of a named entry
46 * which has a name (AA_NAME typecode followed by name string) followed by
47 * the entries typecode and data. Named types allow for optional
48 * elements and extensions to be added and tested for without breaking
49 * backwards compatibility.
57 AA_NAME, /* same as string except it is items name */
69 * aa_ext is the read of the buffer containing the serialized profile. The
70 * data is copied into a kernel buffer in apparmorfs and then handed off to
71 * the unpack routines.
76 void *pos; /* pointer to current position in the buffer */
80 /* audit callback for unpack fields */
81 static void audit_cb(struct audit_buffer *ab, void *va)
83 struct common_audit_data *sa = va;
85 if (aad(sa)->iface.ns) {
86 audit_log_format(ab, " ns=");
87 audit_log_untrustedstring(ab, aad(sa)->iface.ns);
90 audit_log_format(ab, " name=");
91 audit_log_untrustedstring(ab, aad(sa)->name);
93 if (aad(sa)->iface.pos)
94 audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
98 * audit_iface - do audit message for policy unpacking/load/replace/remove
99 * @new: profile if it has been allocated (MAYBE NULL)
100 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
101 * @name: name of the profile being manipulated (MAYBE NULL)
102 * @info: any extra info about the failure (MAYBE NULL)
103 * @e: buffer position info
106 * Returns: %0 or error
108 static int audit_iface(struct aa_profile *new, const char *ns_name,
109 const char *name, const char *info, struct aa_ext *e,
112 struct aa_profile *profile = labels_profile(aa_current_raw_label());
113 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
115 aad(&sa)->iface.pos = e->pos - e->start;
116 aad(&sa)->iface.ns = ns_name;
118 aad(&sa)->name = new->base.hname;
120 aad(&sa)->name = name;
121 aad(&sa)->info = info;
122 aad(&sa)->error = error;
124 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
127 void __aa_loaddata_update(struct aa_loaddata *data, long revision)
131 AA_BUG(!mutex_is_locked(&data->ns->lock));
132 AA_BUG(data->revision > revision);
134 data->revision = revision;
135 if ((data->dents[AAFS_LOADDATA_REVISION])) {
136 d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
137 current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
138 d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
139 current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
143 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
145 if (l->size != r->size)
147 if (l->compressed_size != r->compressed_size)
149 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
151 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
155 * need to take the ns mutex lock which is NOT safe most places that
156 * put_loaddata is called, so we have to delay freeing it
158 static void do_loaddata_free(struct work_struct *work)
160 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
161 struct aa_ns *ns = aa_get_ns(d->ns);
164 mutex_lock_nested(&ns->lock, ns->level);
165 __aa_fs_remove_rawdata(d);
166 mutex_unlock(&ns->lock);
170 kfree_sensitive(d->hash);
171 kfree_sensitive(d->name);
176 void aa_loaddata_kref(struct kref *kref)
178 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
181 INIT_WORK(&d->work, do_loaddata_free);
182 schedule_work(&d->work);
186 struct aa_loaddata *aa_loaddata_alloc(size_t size)
188 struct aa_loaddata *d;
190 d = kzalloc(sizeof(*d), GFP_KERNEL);
192 return ERR_PTR(-ENOMEM);
193 d->data = kvzalloc(size, GFP_KERNEL);
196 return ERR_PTR(-ENOMEM);
198 kref_init(&d->count);
199 INIT_LIST_HEAD(&d->list);
204 /* test if read will be in packed data bounds */
205 static bool inbounds(struct aa_ext *e, size_t size)
207 return (size <= e->end - e->pos);
210 static void *kvmemdup(const void *src, size_t len)
212 void *p = kvmalloc(len, GFP_KERNEL);
220 * unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
221 * @e: serialized data read head (NOT NULL)
222 * @chunk: start address for chunk of data (NOT NULL)
224 * Returns: the size of chunk found with the read head at the end of the chunk.
226 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
231 if (!inbounds(e, sizeof(u16)))
233 size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
234 e->pos += sizeof(__le16);
235 if (!inbounds(e, size))
246 /* unpack control byte */
247 static bool unpack_X(struct aa_ext *e, enum aa_code code)
251 if (*(u8 *) e->pos != code)
258 * unpack_nameX - check is the next element is of type X with a name of @name
259 * @e: serialized data extent information (NOT NULL)
261 * @name: name to match to the serialized element. (MAYBE NULL)
263 * check that the next serialized data element is of type X and has a tag
264 * name @name. If @name is specified then there must be a matching
265 * name element in the stream. If @name is NULL any name element will be
266 * skipped and only the typecode will be tested.
268 * Returns true on success (both type code and name tests match) and the read
269 * head is advanced past the headers
271 * Returns: false if either match fails, the read head does not move
273 static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
276 * May need to reset pos if name or type doesn't match
280 * Check for presence of a tagname, and if present name size
281 * AA_NAME tag value is a u16.
283 if (unpack_X(e, AA_NAME)) {
285 size_t size = unpack_u16_chunk(e, &tag);
286 /* if a name is specified it must match. otherwise skip tag */
287 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
290 /* if a name is specified and there is no name tag fail */
294 /* now check if type code matches */
295 if (unpack_X(e, code))
303 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
307 if (unpack_nameX(e, AA_U8, name)) {
308 if (!inbounds(e, sizeof(u8)))
311 *data = *((u8 *)e->pos);
312 e->pos += sizeof(u8);
321 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
325 if (unpack_nameX(e, AA_U32, name)) {
326 if (!inbounds(e, sizeof(u32)))
329 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
330 e->pos += sizeof(u32);
339 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
343 if (unpack_nameX(e, AA_U64, name)) {
344 if (!inbounds(e, sizeof(u64)))
347 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
348 e->pos += sizeof(u64);
357 static size_t unpack_array(struct aa_ext *e, const char *name)
361 if (unpack_nameX(e, AA_ARRAY, name)) {
363 if (!inbounds(e, sizeof(u16)))
365 size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
366 e->pos += sizeof(u16);
375 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
379 if (unpack_nameX(e, AA_BLOB, name)) {
381 if (!inbounds(e, sizeof(u32)))
383 size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
384 e->pos += sizeof(u32);
385 if (inbounds(e, (size_t) size)) {
397 static int unpack_str(struct aa_ext *e, const char **string, const char *name)
403 if (unpack_nameX(e, AA_STRING, name)) {
404 size = unpack_u16_chunk(e, &src_str);
406 /* strings are null terminated, length is size - 1 */
407 if (src_str[size - 1] != 0)
420 static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
424 int res = unpack_str(e, &tmp, name);
430 *string = kmemdup(tmp, res, GFP_KERNEL);
441 * unpack_dfa - unpack a file rule dfa
442 * @e: serialized data extent information (NOT NULL)
444 * returns dfa or ERR_PTR or NULL if no dfa
446 static struct aa_dfa *unpack_dfa(struct aa_ext *e)
450 struct aa_dfa *dfa = NULL;
452 size = unpack_blob(e, &blob, "aadfa");
455 * The dfa is aligned with in the blob to 8 bytes
456 * from the beginning of the stream.
457 * alignment adjust needed by dfa unpack
459 size_t sz = blob - (char *) e->start -
460 ((e->pos - e->start) & 7);
461 size_t pad = ALIGN(sz, 8) - sz;
462 int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
463 TO_ACCEPT2_FLAG(YYTD_DATA32);
464 if (aa_g_paranoid_load)
465 flags |= DFA_FLAG_VERIFY_STATES;
466 dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
477 * unpack_trans_table - unpack a profile transition table
478 * @e: serialized data extent information (NOT NULL)
479 * @profile: profile to add the accept table to (NOT NULL)
481 * Returns: true if table successfully unpacked
483 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
485 void *saved_pos = e->pos;
487 /* exec table is optional */
488 if (unpack_nameX(e, AA_STRUCT, "xtable")) {
491 size = unpack_array(e, NULL);
492 /* currently 4 exec bits and entries 0-3 are reserved iupcx */
495 profile->file.trans.table = kcalloc(size, sizeof(char *),
497 if (!profile->file.trans.table)
500 profile->file.trans.size = size;
501 for (i = 0; i < size; i++) {
503 int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
504 /* unpack_strdup verifies that the last character is
505 * null termination byte.
509 profile->file.trans.table[i] = str;
510 /* verify that name doesn't start with space */
514 /* count internal # of internal \0 */
515 for (c = j = 0; j < size2 - 1; j++) {
522 /* first character after : must be valid */
525 /* beginning with : requires an embedded \0,
526 * verify that exactly 1 internal \0 exists
527 * trailing \0 already verified by unpack_strdup
529 * convert \0 back to : for label_parse
536 /* fail - all other cases with embedded \0 */
539 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
541 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
547 aa_free_domain_entries(&profile->file.trans);
552 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
556 if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
559 size = unpack_array(e, NULL);
560 profile->xattr_count = size;
561 profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
562 if (!profile->xattrs)
564 for (i = 0; i < size; i++) {
565 if (!unpack_strdup(e, &profile->xattrs[i], NULL))
568 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
570 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
581 static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
586 if (unpack_nameX(e, AA_STRUCT, "secmark")) {
587 size = unpack_array(e, NULL);
589 profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
591 if (!profile->secmark)
594 profile->secmark_count = size;
596 for (i = 0; i < size; i++) {
597 if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
599 if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
601 if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
604 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
606 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
613 if (profile->secmark) {
614 for (i = 0; i < size; i++)
615 kfree(profile->secmark[i].label);
616 kfree(profile->secmark);
617 profile->secmark_count = 0;
618 profile->secmark = NULL;
625 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
629 /* rlimits are optional */
630 if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
633 if (!unpack_u32(e, &tmp, NULL))
635 profile->rlimits.mask = tmp;
637 size = unpack_array(e, NULL);
638 if (size > RLIM_NLIMITS)
640 for (i = 0; i < size; i++) {
642 int a = aa_map_resource(i);
643 if (!unpack_u64(e, &tmp2, NULL))
645 profile->rlimits.limits[a].rlim_max = tmp2;
647 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
649 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
659 static u32 strhash(const void *data, u32 len, u32 seed)
661 const char * const *key = data;
663 return jhash(*key, strlen(*key), seed);
666 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
668 const struct aa_data *data = obj;
669 const char * const *key = arg->key;
671 return strcmp(data->key, *key);
674 /* remap old accept table embedded permissions to separate permission table */
675 static u16 dfa_map_xindex(u16 mask)
677 u16 old_index = (mask >> 10) & 0xf;
681 index |= AA_X_UNSAFE;
683 index |= AA_X_INHERIT;
685 index |= AA_X_UNCONFINED;
687 if (old_index == 1) {
688 index |= AA_X_UNCONFINED;
689 } else if (old_index == 2) {
691 } else if (old_index == 3) {
692 index |= AA_X_NAME | AA_X_CHILD;
693 } else if (old_index) {
695 index |= old_index - 4;
702 * map old dfa inline permissions to new format
704 #define dfa_user_allow(dfa, state) (((ACCEPT_TABLE(dfa)[state]) & 0x7f) | \
705 ((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
706 #define dfa_user_xbits(dfa, state) (((ACCEPT_TABLE(dfa)[state]) >> 7) & 0x7f)
707 #define dfa_user_audit(dfa, state) ((ACCEPT_TABLE2(dfa)[state]) & 0x7f)
708 #define dfa_user_quiet(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 7) & 0x7f)
709 #define dfa_user_xindex(dfa, state) \
710 (dfa_map_xindex(ACCEPT_TABLE(dfa)[state] & 0x3fff))
712 #define dfa_other_allow(dfa, state) ((((ACCEPT_TABLE(dfa)[state]) >> 14) & \
714 ((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
715 #define dfa_other_xbits(dfa, state) \
716 ((((ACCEPT_TABLE(dfa)[state]) >> 7) >> 14) & 0x7f)
717 #define dfa_other_audit(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 14) & 0x7f)
718 #define dfa_other_quiet(dfa, state) \
719 ((((ACCEPT_TABLE2(dfa)[state]) >> 7) >> 14) & 0x7f)
720 #define dfa_other_xindex(dfa, state) \
721 dfa_map_xindex((ACCEPT_TABLE(dfa)[state] >> 14) & 0x3fff)
724 * map_old_perms - map old file perms layout to the new layout
725 * @old: permission set in old mapping
727 * Returns: new permission mapping
729 static u32 map_old_perms(u32 old)
734 new |= AA_MAY_GETATTR | AA_MAY_OPEN;
736 new |= AA_MAY_SETATTR | AA_MAY_CREATE | AA_MAY_DELETE |
737 AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_OPEN;
740 /* the old mapping lock and link_subset flags where overlaid
741 * and use was determined by part of a pair that they were in
744 new |= AA_MAY_LOCK | AA_LINK_SUBSET;
745 if (old & 0x40) /* AA_EXEC_MMAP */
751 static void compute_fperms_allow(struct aa_perms *perms, struct aa_dfa *dfa,
754 perms->allow |= AA_MAY_GETATTR;
756 /* change_profile wasn't determined by ownership in old mapping */
757 if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
758 perms->allow |= AA_MAY_CHANGE_PROFILE;
759 if (ACCEPT_TABLE(dfa)[state] & 0x40000000)
760 perms->allow |= AA_MAY_ONEXEC;
763 static struct aa_perms compute_fperms_user(struct aa_dfa *dfa,
766 struct aa_perms perms = { };
768 perms.allow = map_old_perms(dfa_user_allow(dfa, state));
769 perms.audit = map_old_perms(dfa_user_audit(dfa, state));
770 perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
771 perms.xindex = dfa_user_xindex(dfa, state);
773 compute_fperms_allow(&perms, dfa, state);
778 static struct aa_perms compute_fperms_other(struct aa_dfa *dfa,
781 struct aa_perms perms = { };
783 perms.allow = map_old_perms(dfa_other_allow(dfa, state));
784 perms.audit = map_old_perms(dfa_other_audit(dfa, state));
785 perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
786 perms.xindex = dfa_other_xindex(dfa, state);
788 compute_fperms_allow(&perms, dfa, state);
794 * aa_compute_fperms - convert dfa compressed perms to internal perms and store
795 * them so they can be retrieved later.
796 * @dfa: a dfa using fperms to remap to internal permissions
798 * Returns: remapped perm table
800 static struct aa_perms *compute_fperms(struct aa_dfa *dfa)
803 unsigned int state_count;
804 struct aa_perms *table;
808 state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
809 /* DFAs are restricted from having a state_count of less than 2 */
810 table = kvcalloc(state_count * 2, sizeof(struct aa_perms), GFP_KERNEL);
814 /* zero init so skip the trap state (state == 0) */
815 for (state = 1; state < state_count; state++) {
816 table[state * 2] = compute_fperms_user(dfa, state);
817 table[state * 2 + 1] = compute_fperms_other(dfa, state);
823 static struct aa_perms *compute_xmatch_perms(struct aa_dfa *xmatch)
825 struct aa_perms *perms;
831 state_count = xmatch->tables[YYTD_ID_BASE]->td_lolen;
832 /* DFAs are restricted from having a state_count of less than 2 */
833 perms = kvcalloc(state_count, sizeof(struct aa_perms), GFP_KERNEL);
835 /* zero init so skip the trap state (state == 0) */
836 for (state = 1; state < state_count; state++)
837 perms[state].allow = dfa_user_allow(xmatch, state);
842 static u32 map_other(u32 x)
844 return ((x & 0x3) << 8) | /* SETATTR/GETATTR */
845 ((x & 0x1c) << 18) | /* ACCEPT/BIND/LISTEN */
846 ((x & 0x60) << 19); /* SETOPT/GETOPT */
849 static u32 map_xbits(u32 x)
851 return ((x & 0x1) << 7) |
855 static struct aa_perms compute_perms_entry(struct aa_dfa *dfa,
859 struct aa_perms perms = { };
861 perms.allow = dfa_user_allow(dfa, state);
862 perms.audit = dfa_user_audit(dfa, state);
863 perms.quiet = dfa_user_quiet(dfa, state);
866 * This mapping is convulated due to history.
867 * v1-v4: only file perms, which are handled by compute_fperms
868 * v5: added policydb which dropped user conditional to gain new
869 * perm bits, but had to map around the xbits because the
870 * userspace compiler was still munging them.
871 * v9: adds using the xbits in policydb because the compiler now
872 * supports treating policydb permission bits different.
873 * Unfortunately there is no way to force auditing on the
874 * perms represented by the xbits
876 perms.allow |= map_other(dfa_other_allow(dfa, state));
877 if (VERSION_LE(version, v8))
878 perms.allow |= AA_MAY_LOCK;
880 perms.allow |= map_xbits(dfa_user_xbits(dfa, state));
883 * for v5-v9 perm mapping in the policydb, the other set is used
884 * to extend the general perm set
886 perms.audit |= map_other(dfa_other_audit(dfa, state));
887 perms.quiet |= map_other(dfa_other_quiet(dfa, state));
888 if (VERSION_GT(version, v8))
889 perms.quiet |= map_xbits(dfa_other_xbits(dfa, state));
894 static struct aa_perms *compute_perms(struct aa_dfa *dfa, u32 version)
897 unsigned int state_count;
898 struct aa_perms *table;
902 state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
903 /* DFAs are restricted from having a state_count of less than 2 */
904 table = kvcalloc(state_count, sizeof(struct aa_perms), GFP_KERNEL);
908 /* zero init so skip the trap state (state == 0) */
909 for (state = 1; state < state_count; state++)
910 table[state] = compute_perms_entry(dfa, state, version);
916 * remap_dfa_accept - remap old dfa accept table to be an index
917 * @dfa: dfa to do the remapping on
918 * @factor: scaling factor for the index conversion.
920 * Used in conjunction with compute_Xperms, it converts old style perms
921 * that are encoded in the dfa accept tables to the new style where
922 * there is a permission table and the accept table is an index into
923 * the permission table.
925 static void remap_dfa_accept(struct aa_dfa *dfa, unsigned int factor)
928 unsigned int state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
932 for (state = 0; state < state_count; state++)
933 ACCEPT_TABLE(dfa)[state] = state * factor;
934 kvfree(dfa->tables[YYTD_ID_ACCEPT2]);
935 dfa->tables[YYTD_ID_ACCEPT2] = NULL;
939 * unpack_profile - unpack a serialized profile
940 * @e: serialized data extent information (NOT NULL)
941 * @ns_name: pointer of newly allocated copy of %NULL in case of error
943 * NOTE: unpack profile sets audit struct if there is a failure
945 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
947 struct aa_profile *profile = NULL;
948 const char *tmpname, *tmpns = NULL, *name = NULL;
949 const char *info = "failed to unpack profile";
951 struct rhashtable_params params = { 0 };
953 struct aa_data *data;
954 int i, error = -EPROTO;
960 /* check that we have the right struct being passed */
961 if (!unpack_nameX(e, AA_STRUCT, "profile"))
963 if (!unpack_str(e, &name, NULL))
968 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
970 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
972 info = "out of memory";
978 profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
980 return ERR_PTR(-ENOMEM);
982 /* profile renaming is optional */
983 (void) unpack_str(e, &profile->rename, "rename");
985 /* attachment string is optional */
986 (void) unpack_str(e, &profile->attach, "attach");
988 /* xmatch is optional and may be NULL */
989 profile->xmatch.dfa = unpack_dfa(e);
990 if (IS_ERR(profile->xmatch.dfa)) {
991 error = PTR_ERR(profile->xmatch.dfa);
992 profile->xmatch.dfa = NULL;
996 /* neither xmatch_len not xmatch_perms are optional if xmatch is set */
997 if (profile->xmatch.dfa) {
998 if (!unpack_u32(e, &tmp, NULL)) {
999 info = "missing xmatch len";
1002 profile->xmatch_len = tmp;
1003 profile->xmatch.start[AA_CLASS_XMATCH] = DFA_START;
1004 profile->xmatch.perms = compute_xmatch_perms(profile->xmatch.dfa);
1005 if (!profile->xmatch.perms) {
1006 info = "failed to convert xmatch permission table";
1009 remap_dfa_accept(profile->xmatch.dfa, 1);
1012 /* disconnected attachment string is optional */
1013 (void) unpack_str(e, &profile->disconnected, "disconnected");
1015 /* per profile debug flags (complain, audit) */
1016 if (!unpack_nameX(e, AA_STRUCT, "flags")) {
1017 info = "profile missing flags";
1020 info = "failed to unpack profile flags";
1021 if (!unpack_u32(e, &tmp, NULL))
1023 if (tmp & PACKED_FLAG_HAT)
1024 profile->label.flags |= FLAG_HAT;
1025 if (tmp & PACKED_FLAG_DEBUG1)
1026 profile->label.flags |= FLAG_DEBUG1;
1027 if (tmp & PACKED_FLAG_DEBUG2)
1028 profile->label.flags |= FLAG_DEBUG2;
1029 if (!unpack_u32(e, &tmp, NULL))
1031 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
1032 profile->mode = APPARMOR_COMPLAIN;
1033 } else if (tmp == PACKED_MODE_ENFORCE) {
1034 profile->mode = APPARMOR_ENFORCE;
1035 } else if (tmp == PACKED_MODE_KILL) {
1036 profile->mode = APPARMOR_KILL;
1037 } else if (tmp == PACKED_MODE_UNCONFINED) {
1038 profile->mode = APPARMOR_UNCONFINED;
1039 profile->label.flags |= FLAG_UNCONFINED;
1043 if (!unpack_u32(e, &tmp, NULL))
1046 profile->audit = AUDIT_ALL;
1048 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1051 /* path_flags is optional */
1052 if (unpack_u32(e, &profile->path_flags, "path_flags"))
1053 profile->path_flags |= profile->label.flags &
1054 PATH_MEDIATE_DELETED;
1056 /* set a default value if path_flags field is not present */
1057 profile->path_flags = PATH_MEDIATE_DELETED;
1059 info = "failed to unpack profile capabilities";
1060 if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
1062 if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
1064 if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
1066 if (!unpack_u32(e, &tmpcap.cap[0], NULL))
1069 info = "failed to unpack upper profile capabilities";
1070 if (unpack_nameX(e, AA_STRUCT, "caps64")) {
1071 /* optional upper half of 64 bit caps */
1072 if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
1074 if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
1076 if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
1078 if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
1080 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1084 info = "failed to unpack extended profile capabilities";
1085 if (unpack_nameX(e, AA_STRUCT, "capsx")) {
1086 /* optional extended caps mediation mask */
1087 if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
1089 if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
1091 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1095 if (!unpack_xattrs(e, profile)) {
1096 info = "failed to unpack profile xattrs";
1100 if (!unpack_rlimits(e, profile)) {
1101 info = "failed to unpack profile rlimits";
1105 if (!unpack_secmark(e, profile)) {
1106 info = "failed to unpack profile secmark rules";
1110 if (unpack_nameX(e, AA_STRUCT, "policydb")) {
1111 /* generic policy dfa - optional and may be NULL */
1112 info = "failed to unpack policydb";
1113 profile->policy.dfa = unpack_dfa(e);
1114 if (IS_ERR(profile->policy.dfa)) {
1115 error = PTR_ERR(profile->policy.dfa);
1116 profile->policy.dfa = NULL;
1118 } else if (!profile->policy.dfa) {
1122 if (!unpack_u32(e, &profile->policy.start[0], "start"))
1123 /* default start state */
1124 profile->policy.start[0] = DFA_START;
1125 /* setup class index */
1126 for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
1127 profile->policy.start[i] =
1128 aa_dfa_next(profile->policy.dfa,
1129 profile->policy.start[0],
1132 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1134 profile->policy.perms = compute_perms(profile->policy.dfa,
1136 if (!profile->policy.perms) {
1137 info = "failed to remap policydb permission table";
1140 /* Do not remap internal dfas */
1141 remap_dfa_accept(profile->policy.dfa, 1);
1143 profile->policy.dfa = aa_get_dfa(nulldfa);
1145 /* get file rules */
1146 profile->file.dfa = unpack_dfa(e);
1147 if (IS_ERR(profile->file.dfa)) {
1148 error = PTR_ERR(profile->file.dfa);
1149 profile->file.dfa = NULL;
1150 info = "failed to unpack profile file rules";
1152 } else if (profile->file.dfa) {
1153 if (!unpack_u32(e, &profile->file.start[AA_CLASS_FILE],
1155 /* default start state */
1156 profile->file.start[AA_CLASS_FILE] = DFA_START;
1157 profile->file.perms = compute_fperms(profile->file.dfa);
1158 if (!profile->file.perms) {
1159 info = "failed to remap file permission table";
1162 remap_dfa_accept(profile->file.dfa, 2);
1163 if (!unpack_trans_table(e, profile)) {
1164 info = "failed to unpack profile transition table";
1167 } else if (profile->policy.dfa &&
1168 profile->policy.start[AA_CLASS_FILE]) {
1169 profile->file.dfa = aa_get_dfa(profile->policy.dfa);
1170 profile->file.start[AA_CLASS_FILE] = profile->policy.start[AA_CLASS_FILE];
1172 profile->file.dfa = aa_get_dfa(nulldfa);
1174 if (unpack_nameX(e, AA_STRUCT, "data")) {
1175 info = "out of memory";
1176 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
1180 params.nelem_hint = 3;
1181 params.key_len = sizeof(void *);
1182 params.key_offset = offsetof(struct aa_data, key);
1183 params.head_offset = offsetof(struct aa_data, head);
1184 params.hashfn = strhash;
1185 params.obj_cmpfn = datacmp;
1187 if (rhashtable_init(profile->data, ¶ms)) {
1188 info = "failed to init key, value hash table";
1192 while (unpack_strdup(e, &key, NULL)) {
1193 data = kzalloc(sizeof(*data), GFP_KERNEL);
1195 kfree_sensitive(key);
1200 data->size = unpack_blob(e, &data->data, NULL);
1201 data->data = kvmemdup(data->data, data->size);
1202 if (data->size && !data->data) {
1203 kfree_sensitive(data->key);
1204 kfree_sensitive(data);
1208 rhashtable_insert_fast(profile->data, &data->head,
1212 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
1213 info = "failed to unpack end of key, value data table";
1218 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
1219 info = "failed to unpack end of profile";
1230 audit_iface(profile, NULL, name, info, e, error);
1231 aa_free_profile(profile);
1233 return ERR_PTR(error);
1237 * verify_header - unpack serialized stream header
1238 * @e: serialized data read head (NOT NULL)
1239 * @required: whether the header is required or optional
1240 * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
1242 * Returns: error or 0 if header is good
1244 static int verify_header(struct aa_ext *e, int required, const char **ns)
1246 int error = -EPROTONOSUPPORT;
1247 const char *name = NULL;
1250 /* get the interface version */
1251 if (!unpack_u32(e, &e->version, "version")) {
1253 audit_iface(NULL, NULL, NULL, "invalid profile format",
1259 /* Check that the interface version is currently supported.
1260 * if not specified use previous version
1261 * Mask off everything that is not kernel abi version
1263 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v9)) {
1264 audit_iface(NULL, NULL, NULL, "unsupported interface version",
1269 /* read the namespace if present */
1270 if (unpack_str(e, &name, "namespace")) {
1271 if (*name == '\0') {
1272 audit_iface(NULL, NULL, NULL, "invalid namespace name",
1276 if (*ns && strcmp(*ns, name)) {
1277 audit_iface(NULL, NULL, NULL, "invalid ns change", e,
1280 *ns = kstrdup(name, GFP_KERNEL);
1289 static bool verify_xindex(int xindex, int table_size)
1292 xtype = xindex & AA_X_TYPE_MASK;
1293 index = xindex & AA_X_INDEX_MASK;
1294 if (xtype == AA_X_TABLE && index >= table_size)
1299 /* verify dfa xindexes are in range of transition tables */
1300 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
1303 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
1304 if (!verify_xindex(ACCEPT_TABLE(dfa)[i], table_size))
1311 * verify_profile - Do post unpack analysis to verify profile consistency
1312 * @profile: profile to verify (NOT NULL)
1314 * Returns: 0 if passes verification else error
1316 * This verification is post any unpack mapping or changes
1318 static int verify_profile(struct aa_profile *profile)
1320 if ((profile->file.dfa &&
1321 !verify_dfa_xindex(profile->file.dfa,
1322 profile->file.trans.size)) ||
1323 (profile->policy.dfa &&
1324 !verify_dfa_xindex(profile->policy.dfa,
1325 profile->policy.trans.size))) {
1326 audit_iface(profile, NULL, NULL,
1327 "Unpack: Invalid named transition", NULL, -EPROTO);
1334 void aa_load_ent_free(struct aa_load_ent *ent)
1337 aa_put_profile(ent->rename);
1338 aa_put_profile(ent->old);
1339 aa_put_profile(ent->new);
1340 kfree(ent->ns_name);
1341 kfree_sensitive(ent);
1345 struct aa_load_ent *aa_load_ent_alloc(void)
1347 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
1349 INIT_LIST_HEAD(&ent->list);
1353 static int compress_zstd(const char *src, size_t slen, char **dst, size_t *dlen)
1355 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
1356 const zstd_parameters params =
1357 zstd_get_params(aa_g_rawdata_compression_level, slen);
1358 const size_t wksp_len = zstd_cctx_workspace_bound(¶ms.cParams);
1360 zstd_cctx *ctx = NULL;
1361 size_t out_len = zstd_compress_bound(slen);
1365 out = kvzalloc(out_len, GFP_KERNEL);
1371 wksp = kvzalloc(wksp_len, GFP_KERNEL);
1377 ctx = zstd_init_cctx(wksp, wksp_len);
1383 out_len = zstd_compress_cctx(ctx, out, out_len, src, slen, ¶ms);
1384 if (zstd_is_error(out_len)) {
1389 if (is_vmalloc_addr(out)) {
1390 *dst = kvzalloc(out_len, GFP_KERNEL);
1392 memcpy(*dst, out, out_len);
1398 * If the staging buffer was kmalloc'd, then using krealloc is
1399 * probably going to be faster. The destination buffer will
1400 * always be smaller, so it's just shrunk, avoiding a memcpy
1402 *dst = krealloc(out, out_len, GFP_KERNEL);
1426 static int compress_loaddata(struct aa_loaddata *data)
1428 AA_BUG(data->compressed_size > 0);
1431 * Shortcut the no compression case, else we increase the amount of
1432 * storage required by a small amount
1434 if (aa_g_rawdata_compression_level != 0) {
1435 void *udata = data->data;
1436 int error = compress_zstd(udata, data->size, &data->data,
1437 &data->compressed_size);
1441 if (udata != data->data)
1444 data->compressed_size = data->size;
1450 * aa_unpack - unpack packed binary profile(s) data loaded from user space
1451 * @udata: user data copied to kmem (NOT NULL)
1452 * @lh: list to place unpacked profiles in a aa_repl_ws
1453 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
1455 * Unpack user data and return refcounted allocated profile(s) stored in
1456 * @lh in order of discovery, with the list chain stored in base.list
1459 * Returns: profile(s) on @lh else error pointer if fails to unpack
1461 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
1464 struct aa_load_ent *tmp, *ent;
1465 struct aa_profile *profile = NULL;
1468 .start = udata->data,
1469 .end = udata->data + udata->size,
1474 while (e.pos < e.end) {
1475 char *ns_name = NULL;
1477 error = verify_header(&e, e.pos == e.start, ns);
1482 profile = unpack_profile(&e, &ns_name);
1483 if (IS_ERR(profile)) {
1484 error = PTR_ERR(profile);
1488 error = verify_profile(profile);
1492 if (aa_g_hash_policy)
1493 error = aa_calc_profile_hash(profile, e.version, start,
1498 ent = aa_load_ent_alloc();
1505 ent->ns_name = ns_name;
1506 list_add_tail(&ent->list, lh);
1508 udata->abi = e.version & K_ABI_MASK;
1509 if (aa_g_hash_policy) {
1510 udata->hash = aa_calc_hash(udata->data, udata->size);
1511 if (IS_ERR(udata->hash)) {
1512 error = PTR_ERR(udata->hash);
1518 if (aa_g_export_binary) {
1519 error = compress_loaddata(udata);
1526 aa_put_profile(profile);
1529 list_for_each_entry_safe(ent, tmp, lh, list) {
1530 list_del_init(&ent->list);
1531 aa_load_ent_free(ent);
1537 #ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST
1538 #include "policy_unpack_test.c"
1539 #endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */