1 // SPDX-License-Identifier: GPL-2.0-only
3 * AppArmor security module
5 * This file contains AppArmor functions for unpacking policy loaded from
8 * Copyright (C) 1998-2008 Novell/SUSE
9 * Copyright 2009-2010 Canonical Ltd.
11 * AppArmor uses a serialized binary format for loading policy. To find
12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
13 * All policy is validated before it is used.
16 #include <asm/unaligned.h>
17 #include <linux/ctype.h>
18 #include <linux/errno.h>
19 #include <linux/zstd.h>
21 #include "include/apparmor.h"
22 #include "include/audit.h"
23 #include "include/cred.h"
24 #include "include/crypto.h"
25 #include "include/file.h"
26 #include "include/match.h"
27 #include "include/path.h"
28 #include "include/policy.h"
29 #include "include/policy_unpack.h"
31 #define K_ABI_MASK 0x3ff
32 #define FORCE_COMPLAIN_FLAG 0x800
33 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
34 #define VERSION_LE(X, Y) (((X) & K_ABI_MASK) <= ((Y) & K_ABI_MASK))
35 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
37 #define v5 5 /* base version */
38 #define v6 6 /* per entry policydb mediation check */
40 #define v8 8 /* full network masking */
41 #define v9 9 /* xbits are used as permission bits in policydb */
44 * The AppArmor interface treats data as a type byte followed by the
45 * actual data. The interface has the notion of a named entry
46 * which has a name (AA_NAME typecode followed by name string) followed by
47 * the entries typecode and data. Named types allow for optional
48 * elements and extensions to be added and tested for without breaking
49 * backwards compatibility.
57 AA_NAME, /* same as string except it is items name */
69 * aa_ext is the read of the buffer containing the serialized profile. The
70 * data is copied into a kernel buffer in apparmorfs and then handed off to
71 * the unpack routines.
76 void *pos; /* pointer to current position in the buffer */
80 /* audit callback for unpack fields */
81 static void audit_cb(struct audit_buffer *ab, void *va)
83 struct common_audit_data *sa = va;
85 if (aad(sa)->iface.ns) {
86 audit_log_format(ab, " ns=");
87 audit_log_untrustedstring(ab, aad(sa)->iface.ns);
90 audit_log_format(ab, " name=");
91 audit_log_untrustedstring(ab, aad(sa)->name);
93 if (aad(sa)->iface.pos)
94 audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
98 * audit_iface - do audit message for policy unpacking/load/replace/remove
99 * @new: profile if it has been allocated (MAYBE NULL)
100 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
101 * @name: name of the profile being manipulated (MAYBE NULL)
102 * @info: any extra info about the failure (MAYBE NULL)
103 * @e: buffer position info
106 * Returns: %0 or error
108 static int audit_iface(struct aa_profile *new, const char *ns_name,
109 const char *name, const char *info, struct aa_ext *e,
112 struct aa_profile *profile = labels_profile(aa_current_raw_label());
113 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
115 aad(&sa)->iface.pos = e->pos - e->start;
116 aad(&sa)->iface.ns = ns_name;
118 aad(&sa)->name = new->base.hname;
120 aad(&sa)->name = name;
121 aad(&sa)->info = info;
122 aad(&sa)->error = error;
124 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
127 void __aa_loaddata_update(struct aa_loaddata *data, long revision)
131 AA_BUG(!mutex_is_locked(&data->ns->lock));
132 AA_BUG(data->revision > revision);
134 data->revision = revision;
135 if ((data->dents[AAFS_LOADDATA_REVISION])) {
136 d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
137 current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
138 d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
139 current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
143 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
145 if (l->size != r->size)
147 if (l->compressed_size != r->compressed_size)
149 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
151 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
155 * need to take the ns mutex lock which is NOT safe most places that
156 * put_loaddata is called, so we have to delay freeing it
158 static void do_loaddata_free(struct work_struct *work)
160 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
161 struct aa_ns *ns = aa_get_ns(d->ns);
164 mutex_lock_nested(&ns->lock, ns->level);
165 __aa_fs_remove_rawdata(d);
166 mutex_unlock(&ns->lock);
170 kfree_sensitive(d->hash);
171 kfree_sensitive(d->name);
176 void aa_loaddata_kref(struct kref *kref)
178 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
181 INIT_WORK(&d->work, do_loaddata_free);
182 schedule_work(&d->work);
186 struct aa_loaddata *aa_loaddata_alloc(size_t size)
188 struct aa_loaddata *d;
190 d = kzalloc(sizeof(*d), GFP_KERNEL);
192 return ERR_PTR(-ENOMEM);
193 d->data = kvzalloc(size, GFP_KERNEL);
196 return ERR_PTR(-ENOMEM);
198 kref_init(&d->count);
199 INIT_LIST_HEAD(&d->list);
204 /* test if read will be in packed data bounds */
205 static bool inbounds(struct aa_ext *e, size_t size)
207 return (size <= e->end - e->pos);
210 static void *kvmemdup(const void *src, size_t len)
212 void *p = kvmalloc(len, GFP_KERNEL);
220 * unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
221 * @e: serialized data read head (NOT NULL)
222 * @chunk: start address for chunk of data (NOT NULL)
224 * Returns: the size of chunk found with the read head at the end of the chunk.
226 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
231 if (!inbounds(e, sizeof(u16)))
233 size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
234 e->pos += sizeof(__le16);
235 if (!inbounds(e, size))
246 /* unpack control byte */
247 static bool unpack_X(struct aa_ext *e, enum aa_code code)
251 if (*(u8 *) e->pos != code)
258 * unpack_nameX - check is the next element is of type X with a name of @name
259 * @e: serialized data extent information (NOT NULL)
261 * @name: name to match to the serialized element. (MAYBE NULL)
263 * check that the next serialized data element is of type X and has a tag
264 * name @name. If @name is specified then there must be a matching
265 * name element in the stream. If @name is NULL any name element will be
266 * skipped and only the typecode will be tested.
268 * Returns true on success (both type code and name tests match) and the read
269 * head is advanced past the headers
271 * Returns: false if either match fails, the read head does not move
273 static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
276 * May need to reset pos if name or type doesn't match
280 * Check for presence of a tagname, and if present name size
281 * AA_NAME tag value is a u16.
283 if (unpack_X(e, AA_NAME)) {
285 size_t size = unpack_u16_chunk(e, &tag);
286 /* if a name is specified it must match. otherwise skip tag */
287 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
290 /* if a name is specified and there is no name tag fail */
294 /* now check if type code matches */
295 if (unpack_X(e, code))
303 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
307 if (unpack_nameX(e, AA_U8, name)) {
308 if (!inbounds(e, sizeof(u8)))
311 *data = *((u8 *)e->pos);
312 e->pos += sizeof(u8);
321 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
325 if (unpack_nameX(e, AA_U32, name)) {
326 if (!inbounds(e, sizeof(u32)))
329 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
330 e->pos += sizeof(u32);
339 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
343 if (unpack_nameX(e, AA_U64, name)) {
344 if (!inbounds(e, sizeof(u64)))
347 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
348 e->pos += sizeof(u64);
357 static size_t unpack_array(struct aa_ext *e, const char *name)
361 if (unpack_nameX(e, AA_ARRAY, name)) {
363 if (!inbounds(e, sizeof(u16)))
365 size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
366 e->pos += sizeof(u16);
375 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
379 if (unpack_nameX(e, AA_BLOB, name)) {
381 if (!inbounds(e, sizeof(u32)))
383 size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
384 e->pos += sizeof(u32);
385 if (inbounds(e, (size_t) size)) {
397 static int unpack_str(struct aa_ext *e, const char **string, const char *name)
403 if (unpack_nameX(e, AA_STRING, name)) {
404 size = unpack_u16_chunk(e, &src_str);
406 /* strings are null terminated, length is size - 1 */
407 if (src_str[size - 1] != 0)
420 static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
424 int res = unpack_str(e, &tmp, name);
430 *string = kmemdup(tmp, res, GFP_KERNEL);
441 * unpack_dfa - unpack a file rule dfa
442 * @e: serialized data extent information (NOT NULL)
444 * returns dfa or ERR_PTR or NULL if no dfa
446 static struct aa_dfa *unpack_dfa(struct aa_ext *e)
450 struct aa_dfa *dfa = NULL;
452 size = unpack_blob(e, &blob, "aadfa");
455 * The dfa is aligned with in the blob to 8 bytes
456 * from the beginning of the stream.
457 * alignment adjust needed by dfa unpack
459 size_t sz = blob - (char *) e->start -
460 ((e->pos - e->start) & 7);
461 size_t pad = ALIGN(sz, 8) - sz;
462 int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
463 TO_ACCEPT2_FLAG(YYTD_DATA32);
464 if (aa_g_paranoid_load)
465 flags |= DFA_FLAG_VERIFY_STATES;
466 dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
477 * unpack_trans_table - unpack a profile transition table
478 * @e: serialized data extent information (NOT NULL)
479 * @profile: profile to add the accept table to (NOT NULL)
481 * Returns: true if table successfully unpacked
483 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
485 void *saved_pos = e->pos;
487 /* exec table is optional */
488 if (unpack_nameX(e, AA_STRUCT, "xtable")) {
491 size = unpack_array(e, NULL);
492 /* currently 4 exec bits and entries 0-3 are reserved iupcx */
495 profile->file.trans.table = kcalloc(size, sizeof(char *),
497 if (!profile->file.trans.table)
500 profile->file.trans.size = size;
501 for (i = 0; i < size; i++) {
503 int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
504 /* unpack_strdup verifies that the last character is
505 * null termination byte.
509 profile->file.trans.table[i] = str;
510 /* verify that name doesn't start with space */
514 /* count internal # of internal \0 */
515 for (c = j = 0; j < size2 - 1; j++) {
522 /* first character after : must be valid */
525 /* beginning with : requires an embedded \0,
526 * verify that exactly 1 internal \0 exists
527 * trailing \0 already verified by unpack_strdup
529 * convert \0 back to : for label_parse
536 /* fail - all other cases with embedded \0 */
539 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
541 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
547 aa_free_domain_entries(&profile->file.trans);
552 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
556 if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
559 size = unpack_array(e, NULL);
560 profile->xattr_count = size;
561 profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
562 if (!profile->xattrs)
564 for (i = 0; i < size; i++) {
565 if (!unpack_strdup(e, &profile->xattrs[i], NULL))
568 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
570 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
581 static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
586 if (unpack_nameX(e, AA_STRUCT, "secmark")) {
587 size = unpack_array(e, NULL);
589 profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
591 if (!profile->secmark)
594 profile->secmark_count = size;
596 for (i = 0; i < size; i++) {
597 if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
599 if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
601 if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
604 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
606 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
613 if (profile->secmark) {
614 for (i = 0; i < size; i++)
615 kfree(profile->secmark[i].label);
616 kfree(profile->secmark);
617 profile->secmark_count = 0;
618 profile->secmark = NULL;
625 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
629 /* rlimits are optional */
630 if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
633 if (!unpack_u32(e, &tmp, NULL))
635 profile->rlimits.mask = tmp;
637 size = unpack_array(e, NULL);
638 if (size > RLIM_NLIMITS)
640 for (i = 0; i < size; i++) {
642 int a = aa_map_resource(i);
643 if (!unpack_u64(e, &tmp2, NULL))
645 profile->rlimits.limits[a].rlim_max = tmp2;
647 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
649 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
659 static u32 strhash(const void *data, u32 len, u32 seed)
661 const char * const *key = data;
663 return jhash(*key, strlen(*key), seed);
666 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
668 const struct aa_data *data = obj;
669 const char * const *key = arg->key;
671 return strcmp(data->key, *key);
675 * map_old_perms - map old file perms layout to the new layout
676 * @old: permission set in old mapping
678 * Returns: new permission mapping
680 static u32 map_old_perms(u32 old)
685 new |= AA_MAY_GETATTR | AA_MAY_OPEN;
687 new |= AA_MAY_SETATTR | AA_MAY_CREATE | AA_MAY_DELETE |
688 AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_OPEN;
691 /* the old mapping lock and link_subset flags where overlaid
692 * and use was determined by part of a pair that they were in
695 new |= AA_MAY_LOCK | AA_LINK_SUBSET;
696 if (old & 0x40) /* AA_EXEC_MMAP */
702 static void compute_fperms_allow(struct aa_perms *perms, struct aa_dfa *dfa,
705 perms->allow |= AA_MAY_GETATTR;
707 /* change_profile wasn't determined by ownership in old mapping */
708 if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
709 perms->allow |= AA_MAY_CHANGE_PROFILE;
710 if (ACCEPT_TABLE(dfa)[state] & 0x40000000)
711 perms->allow |= AA_MAY_ONEXEC;
714 static struct aa_perms compute_fperms_user(struct aa_dfa *dfa,
717 struct aa_perms perms = { };
719 perms.allow = map_old_perms(dfa_user_allow(dfa, state));
720 perms.audit = map_old_perms(dfa_user_audit(dfa, state));
721 perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
722 perms.xindex = dfa_user_xindex(dfa, state);
724 compute_fperms_allow(&perms, dfa, state);
729 static struct aa_perms compute_fperms_other(struct aa_dfa *dfa,
732 struct aa_perms perms = { };
734 perms.allow = map_old_perms(dfa_other_allow(dfa, state));
735 perms.audit = map_old_perms(dfa_other_audit(dfa, state));
736 perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
737 perms.xindex = dfa_other_xindex(dfa, state);
739 compute_fperms_allow(&perms, dfa, state);
745 * aa_compute_fperms - convert dfa compressed perms to internal perms and store
746 * them so they can be retrieved later.
747 * @dfa: a dfa using fperms to remap to internal permissions
749 * Returns: remapped perm table
751 static struct aa_perms *compute_fperms(struct aa_dfa *dfa)
754 unsigned int state_count;
755 struct aa_perms *table;
759 state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
760 /* DFAs are restricted from having a state_count of less than 2 */
761 table = kvcalloc(state_count * 2, sizeof(struct aa_perms), GFP_KERNEL);
765 /* zero init so skip the trap state (state == 0) */
766 for (state = 1; state < state_count; state++) {
767 table[state * 2] = compute_fperms_user(dfa, state);
768 table[state * 2 + 1] = compute_fperms_other(dfa, state);
774 static struct aa_perms *compute_xmatch_perms(struct aa_dfa *xmatch)
776 struct aa_perms *perms;
782 state_count = xmatch->tables[YYTD_ID_BASE]->td_lolen;
783 /* DFAs are restricted from having a state_count of less than 2 */
784 perms = kvcalloc(state_count, sizeof(struct aa_perms), GFP_KERNEL);
786 /* zero init so skip the trap state (state == 0) */
787 for (state = 1; state < state_count; state++)
788 perms[state].allow = dfa_user_allow(xmatch, state);
793 static u32 map_other(u32 x)
795 return ((x & 0x3) << 8) | /* SETATTR/GETATTR */
796 ((x & 0x1c) << 18) | /* ACCEPT/BIND/LISTEN */
797 ((x & 0x60) << 19); /* SETOPT/GETOPT */
800 static u32 map_xbits(u32 x)
802 return ((x & 0x1) << 7) |
806 static struct aa_perms compute_perms_entry(struct aa_dfa *dfa,
810 struct aa_perms perms = { };
812 perms.allow = dfa_user_allow(dfa, state);
813 perms.audit = dfa_user_audit(dfa, state);
814 perms.quiet = dfa_user_quiet(dfa, state);
817 * This mapping is convulated due to history.
818 * v1-v4: only file perms, which are handled by compute_fperms
819 * v5: added policydb which dropped user conditional to gain new
820 * perm bits, but had to map around the xbits because the
821 * userspace compiler was still munging them.
822 * v9: adds using the xbits in policydb because the compiler now
823 * supports treating policydb permission bits different.
824 * Unfortunately there is no way to force auditing on the
825 * perms represented by the xbits
827 perms.allow |= map_other(dfa_other_allow(dfa, state));
828 if (VERSION_LE(version, v8))
829 perms.allow |= AA_MAY_LOCK;
831 perms.allow |= map_xbits(dfa_user_xbits(dfa, state));
834 * for v5-v9 perm mapping in the policydb, the other set is used
835 * to extend the general perm set
837 perms.audit |= map_other(dfa_other_audit(dfa, state));
838 perms.quiet |= map_other(dfa_other_quiet(dfa, state));
839 if (VERSION_GT(version, v8))
840 perms.quiet |= map_xbits(dfa_other_xbits(dfa, state));
845 static struct aa_perms *compute_perms(struct aa_dfa *dfa, u32 version)
848 unsigned int state_count;
849 struct aa_perms *table;
853 state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
854 /* DFAs are restricted from having a state_count of less than 2 */
855 table = kvcalloc(state_count, sizeof(struct aa_perms), GFP_KERNEL);
859 /* zero init so skip the trap state (state == 0) */
860 for (state = 1; state < state_count; state++)
861 table[state] = compute_perms_entry(dfa, state, version);
867 * remap_dfa_accept - remap old dfa accept table to be an index
868 * @dfa: dfa to do the remapping on
869 * @factor: scaling factor for the index conversion.
871 * Used in conjunction with compute_Xperms, it converts old style perms
872 * that are encoded in the dfa accept tables to the new style where
873 * there is a permission table and the accept table is an index into
874 * the permission table.
876 static void remap_dfa_accept(struct aa_dfa *dfa, unsigned int factor)
879 unsigned int state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
883 for (state = 0; state < state_count; state++)
884 ACCEPT_TABLE(dfa)[state] = state * factor;
885 kvfree(dfa->tables[YYTD_ID_ACCEPT2]);
886 dfa->tables[YYTD_ID_ACCEPT2] = NULL;
890 * unpack_profile - unpack a serialized profile
891 * @e: serialized data extent information (NOT NULL)
892 * @ns_name: pointer of newly allocated copy of %NULL in case of error
894 * NOTE: unpack profile sets audit struct if there is a failure
896 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
898 struct aa_profile *profile = NULL;
899 const char *tmpname, *tmpns = NULL, *name = NULL;
900 const char *info = "failed to unpack profile";
902 struct rhashtable_params params = { 0 };
904 struct aa_data *data;
905 int i, error = -EPROTO;
911 /* check that we have the right struct being passed */
912 if (!unpack_nameX(e, AA_STRUCT, "profile"))
914 if (!unpack_str(e, &name, NULL))
919 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
921 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
923 info = "out of memory";
929 profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
931 return ERR_PTR(-ENOMEM);
933 /* profile renaming is optional */
934 (void) unpack_str(e, &profile->rename, "rename");
936 /* attachment string is optional */
937 (void) unpack_str(e, &profile->attach, "attach");
939 /* xmatch is optional and may be NULL */
940 profile->xmatch.dfa = unpack_dfa(e);
941 if (IS_ERR(profile->xmatch.dfa)) {
942 error = PTR_ERR(profile->xmatch.dfa);
943 profile->xmatch.dfa = NULL;
947 /* neither xmatch_len not xmatch_perms are optional if xmatch is set */
948 if (profile->xmatch.dfa) {
949 if (!unpack_u32(e, &tmp, NULL)) {
950 info = "missing xmatch len";
953 profile->xmatch_len = tmp;
954 profile->xmatch.start[AA_CLASS_XMATCH] = DFA_START;
955 profile->xmatch.perms = compute_xmatch_perms(profile->xmatch.dfa);
956 if (!profile->xmatch.perms) {
957 info = "failed to convert xmatch permission table";
960 remap_dfa_accept(profile->xmatch.dfa, 1);
963 /* disconnected attachment string is optional */
964 (void) unpack_str(e, &profile->disconnected, "disconnected");
966 /* per profile debug flags (complain, audit) */
967 if (!unpack_nameX(e, AA_STRUCT, "flags")) {
968 info = "profile missing flags";
971 info = "failed to unpack profile flags";
972 if (!unpack_u32(e, &tmp, NULL))
974 if (tmp & PACKED_FLAG_HAT)
975 profile->label.flags |= FLAG_HAT;
976 if (tmp & PACKED_FLAG_DEBUG1)
977 profile->label.flags |= FLAG_DEBUG1;
978 if (tmp & PACKED_FLAG_DEBUG2)
979 profile->label.flags |= FLAG_DEBUG2;
980 if (!unpack_u32(e, &tmp, NULL))
982 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
983 profile->mode = APPARMOR_COMPLAIN;
984 } else if (tmp == PACKED_MODE_ENFORCE) {
985 profile->mode = APPARMOR_ENFORCE;
986 } else if (tmp == PACKED_MODE_KILL) {
987 profile->mode = APPARMOR_KILL;
988 } else if (tmp == PACKED_MODE_UNCONFINED) {
989 profile->mode = APPARMOR_UNCONFINED;
990 profile->label.flags |= FLAG_UNCONFINED;
994 if (!unpack_u32(e, &tmp, NULL))
997 profile->audit = AUDIT_ALL;
999 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1002 /* path_flags is optional */
1003 if (unpack_u32(e, &profile->path_flags, "path_flags"))
1004 profile->path_flags |= profile->label.flags &
1005 PATH_MEDIATE_DELETED;
1007 /* set a default value if path_flags field is not present */
1008 profile->path_flags = PATH_MEDIATE_DELETED;
1010 info = "failed to unpack profile capabilities";
1011 if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
1013 if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
1015 if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
1017 if (!unpack_u32(e, &tmpcap.cap[0], NULL))
1020 info = "failed to unpack upper profile capabilities";
1021 if (unpack_nameX(e, AA_STRUCT, "caps64")) {
1022 /* optional upper half of 64 bit caps */
1023 if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
1025 if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
1027 if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
1029 if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
1031 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1035 info = "failed to unpack extended profile capabilities";
1036 if (unpack_nameX(e, AA_STRUCT, "capsx")) {
1037 /* optional extended caps mediation mask */
1038 if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
1040 if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
1042 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1046 if (!unpack_xattrs(e, profile)) {
1047 info = "failed to unpack profile xattrs";
1051 if (!unpack_rlimits(e, profile)) {
1052 info = "failed to unpack profile rlimits";
1056 if (!unpack_secmark(e, profile)) {
1057 info = "failed to unpack profile secmark rules";
1061 if (unpack_nameX(e, AA_STRUCT, "policydb")) {
1062 /* generic policy dfa - optional and may be NULL */
1063 info = "failed to unpack policydb";
1064 profile->policy.dfa = unpack_dfa(e);
1065 if (IS_ERR(profile->policy.dfa)) {
1066 error = PTR_ERR(profile->policy.dfa);
1067 profile->policy.dfa = NULL;
1069 } else if (!profile->policy.dfa) {
1073 if (!unpack_u32(e, &profile->policy.start[0], "start"))
1074 /* default start state */
1075 profile->policy.start[0] = DFA_START;
1076 /* setup class index */
1077 for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
1078 profile->policy.start[i] =
1079 aa_dfa_next(profile->policy.dfa,
1080 profile->policy.start[0],
1083 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1085 profile->policy.perms = compute_perms(profile->policy.dfa,
1087 if (!profile->policy.perms) {
1088 info = "failed to remap policydb permission table";
1091 /* Do not remap internal dfas */
1092 remap_dfa_accept(profile->policy.dfa, 1);
1094 profile->policy.dfa = aa_get_dfa(nulldfa);
1096 /* get file rules */
1097 profile->file.dfa = unpack_dfa(e);
1098 if (IS_ERR(profile->file.dfa)) {
1099 error = PTR_ERR(profile->file.dfa);
1100 profile->file.dfa = NULL;
1101 info = "failed to unpack profile file rules";
1103 } else if (profile->file.dfa) {
1104 if (!unpack_u32(e, &profile->file.start[AA_CLASS_FILE],
1106 /* default start state */
1107 profile->file.start[AA_CLASS_FILE] = DFA_START;
1108 profile->file.perms = compute_fperms(profile->file.dfa);
1109 if (!profile->file.perms) {
1110 info = "failed to remap file permission table";
1113 remap_dfa_accept(profile->file.dfa, 2);
1114 if (!unpack_trans_table(e, profile)) {
1115 info = "failed to unpack profile transition table";
1118 } else if (profile->policy.dfa &&
1119 profile->policy.start[AA_CLASS_FILE]) {
1120 profile->file.dfa = aa_get_dfa(profile->policy.dfa);
1121 profile->file.start[AA_CLASS_FILE] = profile->policy.start[AA_CLASS_FILE];
1123 profile->file.dfa = aa_get_dfa(nulldfa);
1125 if (unpack_nameX(e, AA_STRUCT, "data")) {
1126 info = "out of memory";
1127 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
1131 params.nelem_hint = 3;
1132 params.key_len = sizeof(void *);
1133 params.key_offset = offsetof(struct aa_data, key);
1134 params.head_offset = offsetof(struct aa_data, head);
1135 params.hashfn = strhash;
1136 params.obj_cmpfn = datacmp;
1138 if (rhashtable_init(profile->data, ¶ms)) {
1139 info = "failed to init key, value hash table";
1143 while (unpack_strdup(e, &key, NULL)) {
1144 data = kzalloc(sizeof(*data), GFP_KERNEL);
1146 kfree_sensitive(key);
1151 data->size = unpack_blob(e, &data->data, NULL);
1152 data->data = kvmemdup(data->data, data->size);
1153 if (data->size && !data->data) {
1154 kfree_sensitive(data->key);
1155 kfree_sensitive(data);
1159 rhashtable_insert_fast(profile->data, &data->head,
1163 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
1164 info = "failed to unpack end of key, value data table";
1169 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
1170 info = "failed to unpack end of profile";
1181 audit_iface(profile, NULL, name, info, e, error);
1182 aa_free_profile(profile);
1184 return ERR_PTR(error);
1188 * verify_header - unpack serialized stream header
1189 * @e: serialized data read head (NOT NULL)
1190 * @required: whether the header is required or optional
1191 * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
1193 * Returns: error or 0 if header is good
1195 static int verify_header(struct aa_ext *e, int required, const char **ns)
1197 int error = -EPROTONOSUPPORT;
1198 const char *name = NULL;
1201 /* get the interface version */
1202 if (!unpack_u32(e, &e->version, "version")) {
1204 audit_iface(NULL, NULL, NULL, "invalid profile format",
1210 /* Check that the interface version is currently supported.
1211 * if not specified use previous version
1212 * Mask off everything that is not kernel abi version
1214 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v9)) {
1215 audit_iface(NULL, NULL, NULL, "unsupported interface version",
1220 /* read the namespace if present */
1221 if (unpack_str(e, &name, "namespace")) {
1222 if (*name == '\0') {
1223 audit_iface(NULL, NULL, NULL, "invalid namespace name",
1227 if (*ns && strcmp(*ns, name)) {
1228 audit_iface(NULL, NULL, NULL, "invalid ns change", e,
1231 *ns = kstrdup(name, GFP_KERNEL);
1240 static bool verify_xindex(int xindex, int table_size)
1243 xtype = xindex & AA_X_TYPE_MASK;
1244 index = xindex & AA_X_INDEX_MASK;
1245 if (xtype == AA_X_TABLE && index >= table_size)
1250 /* verify dfa xindexes are in range of transition tables */
1251 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
1254 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
1255 if (!verify_xindex(ACCEPT_TABLE(dfa)[i], table_size))
1262 * verify_profile - Do post unpack analysis to verify profile consistency
1263 * @profile: profile to verify (NOT NULL)
1265 * Returns: 0 if passes verification else error
1267 * This verification is post any unpack mapping or changes
1269 static int verify_profile(struct aa_profile *profile)
1271 if ((profile->file.dfa &&
1272 !verify_dfa_xindex(profile->file.dfa,
1273 profile->file.trans.size)) ||
1274 (profile->policy.dfa &&
1275 !verify_dfa_xindex(profile->policy.dfa,
1276 profile->policy.trans.size))) {
1277 audit_iface(profile, NULL, NULL,
1278 "Unpack: Invalid named transition", NULL, -EPROTO);
1285 void aa_load_ent_free(struct aa_load_ent *ent)
1288 aa_put_profile(ent->rename);
1289 aa_put_profile(ent->old);
1290 aa_put_profile(ent->new);
1291 kfree(ent->ns_name);
1292 kfree_sensitive(ent);
1296 struct aa_load_ent *aa_load_ent_alloc(void)
1298 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
1300 INIT_LIST_HEAD(&ent->list);
1304 static int compress_zstd(const char *src, size_t slen, char **dst, size_t *dlen)
1306 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
1307 const zstd_parameters params =
1308 zstd_get_params(aa_g_rawdata_compression_level, slen);
1309 const size_t wksp_len = zstd_cctx_workspace_bound(¶ms.cParams);
1311 zstd_cctx *ctx = NULL;
1312 size_t out_len = zstd_compress_bound(slen);
1316 out = kvzalloc(out_len, GFP_KERNEL);
1322 wksp = kvzalloc(wksp_len, GFP_KERNEL);
1328 ctx = zstd_init_cctx(wksp, wksp_len);
1334 out_len = zstd_compress_cctx(ctx, out, out_len, src, slen, ¶ms);
1335 if (zstd_is_error(out_len)) {
1340 if (is_vmalloc_addr(out)) {
1341 *dst = kvzalloc(out_len, GFP_KERNEL);
1343 memcpy(*dst, out, out_len);
1349 * If the staging buffer was kmalloc'd, then using krealloc is
1350 * probably going to be faster. The destination buffer will
1351 * always be smaller, so it's just shrunk, avoiding a memcpy
1353 *dst = krealloc(out, out_len, GFP_KERNEL);
1377 static int compress_loaddata(struct aa_loaddata *data)
1379 AA_BUG(data->compressed_size > 0);
1382 * Shortcut the no compression case, else we increase the amount of
1383 * storage required by a small amount
1385 if (aa_g_rawdata_compression_level != 0) {
1386 void *udata = data->data;
1387 int error = compress_zstd(udata, data->size, &data->data,
1388 &data->compressed_size);
1392 if (udata != data->data)
1395 data->compressed_size = data->size;
1401 * aa_unpack - unpack packed binary profile(s) data loaded from user space
1402 * @udata: user data copied to kmem (NOT NULL)
1403 * @lh: list to place unpacked profiles in a aa_repl_ws
1404 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
1406 * Unpack user data and return refcounted allocated profile(s) stored in
1407 * @lh in order of discovery, with the list chain stored in base.list
1410 * Returns: profile(s) on @lh else error pointer if fails to unpack
1412 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
1415 struct aa_load_ent *tmp, *ent;
1416 struct aa_profile *profile = NULL;
1419 .start = udata->data,
1420 .end = udata->data + udata->size,
1425 while (e.pos < e.end) {
1426 char *ns_name = NULL;
1428 error = verify_header(&e, e.pos == e.start, ns);
1433 profile = unpack_profile(&e, &ns_name);
1434 if (IS_ERR(profile)) {
1435 error = PTR_ERR(profile);
1439 error = verify_profile(profile);
1443 if (aa_g_hash_policy)
1444 error = aa_calc_profile_hash(profile, e.version, start,
1449 ent = aa_load_ent_alloc();
1456 ent->ns_name = ns_name;
1457 list_add_tail(&ent->list, lh);
1459 udata->abi = e.version & K_ABI_MASK;
1460 if (aa_g_hash_policy) {
1461 udata->hash = aa_calc_hash(udata->data, udata->size);
1462 if (IS_ERR(udata->hash)) {
1463 error = PTR_ERR(udata->hash);
1469 if (aa_g_export_binary) {
1470 error = compress_loaddata(udata);
1477 aa_put_profile(profile);
1480 list_for_each_entry_safe(ent, tmp, lh, list) {
1481 list_del_init(&ent->list);
1482 aa_load_ent_free(ent);
1488 #ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST
1489 #include "policy_unpack_test.c"
1490 #endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */