1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021-2022, Alibaba Cloud
7 #include <linux/security.h>
11 struct super_block *sb;
19 static int init_inode_xattrs(struct inode *inode)
21 struct erofs_inode *const vi = EROFS_I(inode);
24 struct erofs_xattr_ibody_header *ih;
25 struct super_block *sb = inode->i_sb;
28 /* the most case is that xattrs of this inode are initialized. */
29 if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
31 * paired with smp_mb() at the end of the function to ensure
32 * fields will only be observed after the bit is set.
38 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
41 /* someone has initialized xattrs for us? */
42 if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
46 * bypass all xattr operations if ->xattr_isize is not greater than
47 * sizeof(struct erofs_xattr_ibody_header), in detail:
48 * 1) it is not enough to contain erofs_xattr_ibody_header then
49 * ->xattr_isize should be 0 (it means no xattr);
50 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
51 * undefined right now (maybe use later with some new sb feature).
53 if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
55 "xattr_isize %d of nid %llu is not supported yet",
56 vi->xattr_isize, vi->nid);
59 } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
60 if (vi->xattr_isize) {
61 erofs_err(sb, "bogus xattr ibody @ nid %llu", vi->nid);
64 goto out_unlock; /* xattr ondisk layout error */
70 it.buf = __EROFS_BUF_INITIALIZER;
71 it.blkaddr = erofs_blknr(erofs_iloc(inode) + vi->inode_isize);
72 it.ofs = erofs_blkoff(erofs_iloc(inode) + vi->inode_isize);
74 /* read in shared xattr array (non-atomic, see kmalloc below) */
75 it.kaddr = erofs_read_metabuf(&it.buf, sb, it.blkaddr, EROFS_KMAP);
76 if (IS_ERR(it.kaddr)) {
77 ret = PTR_ERR(it.kaddr);
81 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
82 vi->xattr_shared_count = ih->h_shared_count;
83 vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
84 sizeof(uint), GFP_KERNEL);
85 if (!vi->xattr_shared_xattrs) {
86 erofs_put_metabuf(&it.buf);
91 /* let's skip ibody header */
92 it.ofs += sizeof(struct erofs_xattr_ibody_header);
94 for (i = 0; i < vi->xattr_shared_count; ++i) {
95 if (it.ofs >= EROFS_BLKSIZ) {
96 /* cannot be unaligned */
97 DBG_BUGON(it.ofs != EROFS_BLKSIZ);
99 it.kaddr = erofs_read_metabuf(&it.buf, sb, ++it.blkaddr,
101 if (IS_ERR(it.kaddr)) {
102 kfree(vi->xattr_shared_xattrs);
103 vi->xattr_shared_xattrs = NULL;
104 ret = PTR_ERR(it.kaddr);
109 vi->xattr_shared_xattrs[i] =
110 le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
111 it.ofs += sizeof(__le32);
113 erofs_put_metabuf(&it.buf);
115 /* paired with smp_mb() at the beginning of the function. */
117 set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
120 clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
125 * the general idea for these return values is
126 * if 0 is returned, go on processing the current xattr;
127 * 1 (> 0) is returned, skip this round to process the next xattr;
128 * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
129 * and need to be handled
131 struct xattr_iter_handlers {
132 int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
133 int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
135 int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
136 void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
140 static inline int xattr_iter_fixup(struct xattr_iter *it)
142 if (it->ofs < EROFS_BLKSIZ)
145 it->blkaddr += erofs_blknr(it->ofs);
146 it->kaddr = erofs_read_metabuf(&it->buf, it->sb, it->blkaddr,
148 if (IS_ERR(it->kaddr))
149 return PTR_ERR(it->kaddr);
150 it->ofs = erofs_blkoff(it->ofs);
154 static int inline_xattr_iter_begin(struct xattr_iter *it,
157 struct erofs_inode *const vi = EROFS_I(inode);
158 unsigned int xattr_header_sz, inline_xattr_ofs;
160 xattr_header_sz = inlinexattr_header_size(inode);
161 if (xattr_header_sz >= vi->xattr_isize) {
162 DBG_BUGON(xattr_header_sz > vi->xattr_isize);
166 inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
168 it->blkaddr = erofs_blknr(erofs_iloc(inode) + inline_xattr_ofs);
169 it->ofs = erofs_blkoff(erofs_iloc(inode) + inline_xattr_ofs);
170 it->kaddr = erofs_read_metabuf(&it->buf, inode->i_sb, it->blkaddr,
172 if (IS_ERR(it->kaddr))
173 return PTR_ERR(it->kaddr);
174 return vi->xattr_isize - xattr_header_sz;
178 * Regardless of success or failure, `xattr_foreach' will end up with
179 * `ofs' pointing to the next xattr item rather than an arbitrary position.
181 static int xattr_foreach(struct xattr_iter *it,
182 const struct xattr_iter_handlers *op,
183 unsigned int *tlimit)
185 struct erofs_xattr_entry entry;
186 unsigned int value_sz, processed, slice;
189 /* 0. fixup blkaddr, ofs, ipage */
190 err = xattr_iter_fixup(it);
195 * 1. read xattr entry to the memory,
196 * since we do EROFS_XATTR_ALIGN
197 * therefore entry should be in the page
199 entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
201 unsigned int entry_sz = erofs_xattr_entry_size(&entry);
203 /* xattr on-disk corruption: xattr entry beyond xattr_isize */
204 if (*tlimit < entry_sz) {
206 return -EFSCORRUPTED;
211 it->ofs += sizeof(struct erofs_xattr_entry);
212 value_sz = le16_to_cpu(entry.e_value_size);
215 err = op->entry(it, &entry);
217 it->ofs += entry.e_name_len + value_sz;
221 /* 2. handle xattr name (ofs will finally be at the end of name) */
224 while (processed < entry.e_name_len) {
225 if (it->ofs >= EROFS_BLKSIZ) {
226 DBG_BUGON(it->ofs > EROFS_BLKSIZ);
228 err = xattr_iter_fixup(it);
234 slice = min_t(unsigned int, EROFS_BLKSIZ - it->ofs,
235 entry.e_name_len - processed);
238 err = op->name(it, processed, it->kaddr + it->ofs, slice);
240 it->ofs += entry.e_name_len - processed + value_sz;
248 /* 3. handle xattr value */
251 if (op->alloc_buffer) {
252 err = op->alloc_buffer(it, value_sz);
259 while (processed < value_sz) {
260 if (it->ofs >= EROFS_BLKSIZ) {
261 DBG_BUGON(it->ofs > EROFS_BLKSIZ);
263 err = xattr_iter_fixup(it);
269 slice = min_t(unsigned int, EROFS_BLKSIZ - it->ofs,
270 value_sz - processed);
271 op->value(it, processed, it->kaddr + it->ofs, slice);
277 /* xattrs should be 4-byte aligned (on-disk constraint) */
278 it->ofs = EROFS_XATTR_ALIGN(it->ofs);
279 return err < 0 ? err : 0;
282 struct getxattr_iter {
283 struct xattr_iter it;
286 int buffer_size, index;
290 static int xattr_entrymatch(struct xattr_iter *_it,
291 struct erofs_xattr_entry *entry)
293 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
295 return (it->index != entry->e_name_index ||
296 it->name.len != entry->e_name_len) ? -ENOATTR : 0;
299 static int xattr_namematch(struct xattr_iter *_it,
300 unsigned int processed, char *buf, unsigned int len)
302 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
304 return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
307 static int xattr_checkbuffer(struct xattr_iter *_it,
308 unsigned int value_sz)
310 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
311 int err = it->buffer_size < value_sz ? -ERANGE : 0;
313 it->buffer_size = value_sz;
314 return !it->buffer ? 1 : err;
317 static void xattr_copyvalue(struct xattr_iter *_it,
318 unsigned int processed,
319 char *buf, unsigned int len)
321 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
323 memcpy(it->buffer + processed, buf, len);
326 static const struct xattr_iter_handlers find_xattr_handlers = {
327 .entry = xattr_entrymatch,
328 .name = xattr_namematch,
329 .alloc_buffer = xattr_checkbuffer,
330 .value = xattr_copyvalue
333 static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
336 unsigned int remaining;
338 ret = inline_xattr_iter_begin(&it->it, inode);
344 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
348 return ret ? ret : it->buffer_size;
351 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
353 struct erofs_inode *const vi = EROFS_I(inode);
354 struct super_block *const sb = inode->i_sb;
355 struct erofs_sb_info *const sbi = EROFS_SB(sb);
359 for (i = 0; i < vi->xattr_shared_count; ++i) {
360 erofs_blk_t blkaddr =
361 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
363 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
364 it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb, blkaddr,
366 if (IS_ERR(it->it.kaddr))
367 return PTR_ERR(it->it.kaddr);
368 it->it.blkaddr = blkaddr;
370 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
374 return ret ? ret : it->buffer_size;
377 static bool erofs_xattr_user_list(struct dentry *dentry)
379 return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER);
382 static bool erofs_xattr_trusted_list(struct dentry *dentry)
384 return capable(CAP_SYS_ADMIN);
387 int erofs_getxattr(struct inode *inode, int index,
389 void *buffer, size_t buffer_size)
392 struct getxattr_iter it;
397 ret = init_inode_xattrs(inode);
402 it.name.len = strlen(name);
403 if (it.name.len > EROFS_NAME_LEN)
406 it.it.buf = __EROFS_BUF_INITIALIZER;
410 it.buffer_size = buffer_size;
412 it.it.sb = inode->i_sb;
413 ret = inline_getxattr(inode, &it);
415 ret = shared_getxattr(inode, &it);
416 erofs_put_metabuf(&it.it.buf);
420 static int erofs_xattr_generic_get(const struct xattr_handler *handler,
421 struct dentry *unused, struct inode *inode,
422 const char *name, void *buffer, size_t size)
424 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
426 switch (handler->flags) {
427 case EROFS_XATTR_INDEX_USER:
428 if (!test_opt(&sbi->opt, XATTR_USER))
431 case EROFS_XATTR_INDEX_TRUSTED:
433 case EROFS_XATTR_INDEX_SECURITY:
439 return erofs_getxattr(inode, handler->flags, name, buffer, size);
442 const struct xattr_handler erofs_xattr_user_handler = {
443 .prefix = XATTR_USER_PREFIX,
444 .flags = EROFS_XATTR_INDEX_USER,
445 .list = erofs_xattr_user_list,
446 .get = erofs_xattr_generic_get,
449 const struct xattr_handler erofs_xattr_trusted_handler = {
450 .prefix = XATTR_TRUSTED_PREFIX,
451 .flags = EROFS_XATTR_INDEX_TRUSTED,
452 .list = erofs_xattr_trusted_list,
453 .get = erofs_xattr_generic_get,
456 #ifdef CONFIG_EROFS_FS_SECURITY
457 const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
458 .prefix = XATTR_SECURITY_PREFIX,
459 .flags = EROFS_XATTR_INDEX_SECURITY,
460 .get = erofs_xattr_generic_get,
464 const struct xattr_handler *erofs_xattr_handlers[] = {
465 &erofs_xattr_user_handler,
466 &erofs_xattr_trusted_handler,
467 #ifdef CONFIG_EROFS_FS_SECURITY
468 &erofs_xattr_security_handler,
473 struct listxattr_iter {
474 struct xattr_iter it;
476 struct dentry *dentry;
478 int buffer_size, buffer_ofs;
481 static int xattr_entrylist(struct xattr_iter *_it,
482 struct erofs_xattr_entry *entry)
484 struct listxattr_iter *it =
485 container_of(_it, struct listxattr_iter, it);
486 unsigned int prefix_len;
489 const struct xattr_handler *h =
490 erofs_xattr_handler(entry->e_name_index);
492 if (!h || (h->list && !h->list(it->dentry)))
495 prefix = xattr_prefix(h);
496 prefix_len = strlen(prefix);
499 it->buffer_ofs += prefix_len + entry->e_name_len + 1;
503 if (it->buffer_ofs + prefix_len
504 + entry->e_name_len + 1 > it->buffer_size)
507 memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
508 it->buffer_ofs += prefix_len;
512 static int xattr_namelist(struct xattr_iter *_it,
513 unsigned int processed, char *buf, unsigned int len)
515 struct listxattr_iter *it =
516 container_of(_it, struct listxattr_iter, it);
518 memcpy(it->buffer + it->buffer_ofs, buf, len);
519 it->buffer_ofs += len;
523 static int xattr_skipvalue(struct xattr_iter *_it,
524 unsigned int value_sz)
526 struct listxattr_iter *it =
527 container_of(_it, struct listxattr_iter, it);
529 it->buffer[it->buffer_ofs++] = '\0';
533 static const struct xattr_iter_handlers list_xattr_handlers = {
534 .entry = xattr_entrylist,
535 .name = xattr_namelist,
536 .alloc_buffer = xattr_skipvalue,
540 static int inline_listxattr(struct listxattr_iter *it)
543 unsigned int remaining;
545 ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
551 ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
555 return ret ? ret : it->buffer_ofs;
558 static int shared_listxattr(struct listxattr_iter *it)
560 struct inode *const inode = d_inode(it->dentry);
561 struct erofs_inode *const vi = EROFS_I(inode);
562 struct super_block *const sb = inode->i_sb;
563 struct erofs_sb_info *const sbi = EROFS_SB(sb);
567 for (i = 0; i < vi->xattr_shared_count; ++i) {
568 erofs_blk_t blkaddr =
569 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
571 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
572 it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb, blkaddr,
574 if (IS_ERR(it->it.kaddr))
575 return PTR_ERR(it->it.kaddr);
576 it->it.blkaddr = blkaddr;
578 ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
582 return ret ? ret : it->buffer_ofs;
585 ssize_t erofs_listxattr(struct dentry *dentry,
586 char *buffer, size_t buffer_size)
589 struct listxattr_iter it;
591 ret = init_inode_xattrs(d_inode(dentry));
597 it.it.buf = __EROFS_BUF_INITIALIZER;
600 it.buffer_size = buffer_size;
603 it.it.sb = dentry->d_sb;
605 ret = inline_listxattr(&it);
606 if (ret >= 0 || ret == -ENOATTR)
607 ret = shared_listxattr(&it);
608 erofs_put_metabuf(&it.it.buf);
612 #ifdef CONFIG_EROFS_FS_POSIX_ACL
613 struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu)
615 struct posix_acl *acl;
620 return ERR_PTR(-ECHILD);
623 case ACL_TYPE_ACCESS:
624 prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
626 case ACL_TYPE_DEFAULT:
627 prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
630 return ERR_PTR(-EINVAL);
633 rc = erofs_getxattr(inode, prefix, "", NULL, 0);
635 value = kmalloc(rc, GFP_KERNEL);
637 return ERR_PTR(-ENOMEM);
638 rc = erofs_getxattr(inode, prefix, "", value, rc);
646 acl = posix_acl_from_xattr(&init_user_ns, value, rc);