1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_da_format.h"
17 #include "xfs_da_btree.h"
18 #include "xfs_inode.h"
19 #include "xfs_trans.h"
22 #include "xfs_attr_remote.h"
23 #include "xfs_trace.h"
24 #include "xfs_error.h"
26 #define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
29 * Remote Attribute Values
30 * =======================
32 * Remote extended attribute values are conceptually simple -- they're written
33 * to data blocks mapped by an inode's attribute fork, and they have an upper
34 * size limit of 64k. Setting a value does not involve the XFS log.
36 * However, on a v5 filesystem, maximally sized remote attr values require one
37 * block more than 64k worth of space to hold both the remote attribute value
38 * header (64 bytes). On a 4k block filesystem this results in a 68k buffer;
39 * on a 64k block filesystem, this would be a 128k buffer. Note that the log
40 * format can only handle a dirty buffer of XFS_MAX_BLOCKSIZE length (64k).
41 * Therefore, we /must/ ensure that remote attribute value buffers never touch
42 * the logging system and therefore never have a log item.
46 * Each contiguous block has a header, so it is not just a simple attribute
47 * length to FSB conversion.
54 if (xfs_has_crc(mp)) {
55 int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
56 return (attrlen + buflen - 1) / buflen;
58 return XFS_B_TO_FSB(mp, attrlen);
62 * Checking of the remote attribute header is split into two parts. The verifier
63 * does CRC, location and bounds checking, the unpacking function checks the
64 * attribute parameters and owner.
74 struct xfs_attr3_rmt_hdr *rmt = ptr;
76 if (bno != be64_to_cpu(rmt->rm_blkno))
77 return __this_address;
78 if (offset != be32_to_cpu(rmt->rm_offset))
79 return __this_address;
80 if (size != be32_to_cpu(rmt->rm_bytes))
81 return __this_address;
82 if (ino != be64_to_cpu(rmt->rm_owner))
83 return __this_address;
97 struct xfs_attr3_rmt_hdr *rmt = ptr;
99 if (!xfs_verify_magic(bp, rmt->rm_magic))
100 return __this_address;
101 if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid))
102 return __this_address;
103 if (be64_to_cpu(rmt->rm_blkno) != bno)
104 return __this_address;
105 if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
106 return __this_address;
107 if (be32_to_cpu(rmt->rm_offset) +
108 be32_to_cpu(rmt->rm_bytes) > XFS_XATTR_SIZE_MAX)
109 return __this_address;
110 if (rmt->rm_owner == 0)
111 return __this_address;
117 __xfs_attr3_rmt_read_verify(
120 xfs_failaddr_t *failaddr)
122 struct xfs_mount *mp = bp->b_mount;
126 int blksize = mp->m_attr_geo->blksize;
128 /* no verification of non-crc buffers */
129 if (!xfs_has_crc(mp))
133 bno = xfs_buf_daddr(bp);
134 len = BBTOB(bp->b_length);
135 ASSERT(len >= blksize);
139 !xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
140 *failaddr = __this_address;
143 *failaddr = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
145 return -EFSCORRUPTED;
148 bno += BTOBB(blksize);
152 *failaddr = __this_address;
153 return -EFSCORRUPTED;
160 xfs_attr3_rmt_read_verify(
166 error = __xfs_attr3_rmt_read_verify(bp, true, &fa);
168 xfs_verifier_error(bp, error, fa);
171 static xfs_failaddr_t
172 xfs_attr3_rmt_verify_struct(
178 error = __xfs_attr3_rmt_read_verify(bp, false, &fa);
179 return error ? fa : NULL;
183 xfs_attr3_rmt_write_verify(
186 struct xfs_mount *mp = bp->b_mount;
188 int blksize = mp->m_attr_geo->blksize;
193 /* no verification of non-crc buffers */
194 if (!xfs_has_crc(mp))
198 bno = xfs_buf_daddr(bp);
199 len = BBTOB(bp->b_length);
200 ASSERT(len >= blksize);
203 struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
205 fa = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
207 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
212 * Ensure we aren't writing bogus LSNs to disk. See
213 * xfs_attr3_rmt_hdr_set() for the explanation.
215 if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
216 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
219 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
223 bno += BTOBB(blksize);
227 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
230 const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
231 .name = "xfs_attr3_rmt",
232 .magic = { 0, cpu_to_be32(XFS_ATTR3_RMT_MAGIC) },
233 .verify_read = xfs_attr3_rmt_read_verify,
234 .verify_write = xfs_attr3_rmt_write_verify,
235 .verify_struct = xfs_attr3_rmt_verify_struct,
239 xfs_attr3_rmt_hdr_set(
240 struct xfs_mount *mp,
247 struct xfs_attr3_rmt_hdr *rmt = ptr;
249 if (!xfs_has_crc(mp))
252 rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC);
253 rmt->rm_offset = cpu_to_be32(offset);
254 rmt->rm_bytes = cpu_to_be32(size);
255 uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid);
256 rmt->rm_owner = cpu_to_be64(ino);
257 rmt->rm_blkno = cpu_to_be64(bno);
260 * Remote attribute blocks are written synchronously, so we don't
261 * have an LSN that we can stamp in them that makes any sense to log
262 * recovery. To ensure that log recovery handles overwrites of these
263 * blocks sanely (i.e. once they've been freed and reallocated as some
264 * other type of metadata) we need to ensure that the LSN has a value
265 * that tells log recovery to ignore the LSN and overwrite the buffer
266 * with whatever is in it's log. To do this, we use the magic
267 * NULLCOMMITLSN to indicate that the LSN is invalid.
269 rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
271 return sizeof(struct xfs_attr3_rmt_hdr);
275 * Helper functions to copy attribute data in and out of the one disk extents
278 xfs_attr_rmtval_copyout(
279 struct xfs_mount *mp,
286 char *src = bp->b_addr;
287 xfs_daddr_t bno = xfs_buf_daddr(bp);
288 int len = BBTOB(bp->b_length);
289 int blksize = mp->m_attr_geo->blksize;
291 ASSERT(len >= blksize);
293 while (len > 0 && *valuelen > 0) {
295 int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
297 byte_cnt = min(*valuelen, byte_cnt);
299 if (xfs_has_crc(mp)) {
300 if (xfs_attr3_rmt_hdr_ok(src, ino, *offset,
303 "remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
304 bno, *offset, byte_cnt, ino);
305 return -EFSCORRUPTED;
307 hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
310 memcpy(*dst, src + hdr_size, byte_cnt);
312 /* roll buffer forwards */
315 bno += BTOBB(blksize);
317 /* roll attribute data forwards */
318 *valuelen -= byte_cnt;
326 xfs_attr_rmtval_copyin(
327 struct xfs_mount *mp,
334 char *dst = bp->b_addr;
335 xfs_daddr_t bno = xfs_buf_daddr(bp);
336 int len = BBTOB(bp->b_length);
337 int blksize = mp->m_attr_geo->blksize;
339 ASSERT(len >= blksize);
341 while (len > 0 && *valuelen > 0) {
343 int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
345 byte_cnt = min(*valuelen, byte_cnt);
346 hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
349 memcpy(dst + hdr_size, *src, byte_cnt);
352 * If this is the last block, zero the remainder of it.
353 * Check that we are actually the last block, too.
355 if (byte_cnt + hdr_size < blksize) {
356 ASSERT(*valuelen - byte_cnt == 0);
357 ASSERT(len == blksize);
358 memset(dst + hdr_size + byte_cnt, 0,
359 blksize - hdr_size - byte_cnt);
362 /* roll buffer forwards */
365 bno += BTOBB(blksize);
367 /* roll attribute data forwards */
368 *valuelen -= byte_cnt;
375 * Read the value associated with an attribute from the out-of-line buffer
376 * that we stored it in.
378 * Returns 0 on successful retrieval, otherwise an error.
382 struct xfs_da_args *args)
384 struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
385 struct xfs_mount *mp = args->dp->i_mount;
387 xfs_dablk_t lblkno = args->rmtblkno;
388 uint8_t *dst = args->value;
392 int blkcnt = args->rmtblkcnt;
396 trace_xfs_attr_rmtval_get(args);
398 ASSERT(args->valuelen != 0);
399 ASSERT(args->rmtvaluelen == args->valuelen);
401 valuelen = args->rmtvaluelen;
402 while (valuelen > 0) {
403 nmap = ATTR_RMTVALUE_MAPSIZE;
404 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
411 for (i = 0; (i < nmap) && (valuelen > 0); i++) {
415 ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
416 (map[i].br_startblock != HOLESTARTBLOCK));
417 dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
418 dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
419 error = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt,
420 0, &bp, &xfs_attr3_rmt_buf_ops);
424 error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
431 /* roll attribute extent map forwards */
432 lblkno += map[i].br_blockcount;
433 blkcnt -= map[i].br_blockcount;
436 ASSERT(valuelen == 0);
441 * Find a "hole" in the attribute address space large enough for us to drop the
442 * new attributes value into
445 xfs_attr_rmt_find_hole(
446 struct xfs_da_args *args)
448 struct xfs_inode *dp = args->dp;
449 struct xfs_mount *mp = dp->i_mount;
452 xfs_fileoff_t lfileoff = 0;
455 * Because CRC enable attributes have headers, we can't just do a
456 * straight byte to FSB conversion and have to take the header space
459 blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
460 error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
465 args->rmtblkno = (xfs_dablk_t)lfileoff;
466 args->rmtblkcnt = blkcnt;
472 xfs_attr_rmtval_set_value(
473 struct xfs_da_args *args)
475 struct xfs_inode *dp = args->dp;
476 struct xfs_mount *mp = dp->i_mount;
477 struct xfs_bmbt_irec map;
479 uint8_t *src = args->value;
487 * Roll through the "value", copying the attribute value to the
488 * already-allocated blocks. Blocks are written synchronously
489 * so that we can know they are all on disk before we turn off
490 * the INCOMPLETE flag.
492 lblkno = args->rmtblkno;
493 blkcnt = args->rmtblkcnt;
494 valuelen = args->rmtvaluelen;
495 while (valuelen > 0) {
503 error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
509 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
510 (map.br_startblock != HOLESTARTBLOCK));
512 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
513 dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
515 error = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, &bp);
518 bp->b_ops = &xfs_attr3_rmt_buf_ops;
520 xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
523 error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
529 /* roll attribute extent map forwards */
530 lblkno += map.br_blockcount;
531 blkcnt -= map.br_blockcount;
533 ASSERT(valuelen == 0);
537 /* Mark stale any incore buffers for the remote value. */
539 xfs_attr_rmtval_stale(
540 struct xfs_inode *ip,
541 struct xfs_bmbt_irec *map,
542 xfs_buf_flags_t incore_flags)
544 struct xfs_mount *mp = ip->i_mount;
548 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
550 if (XFS_IS_CORRUPT(mp, map->br_startblock == DELAYSTARTBLOCK) ||
551 XFS_IS_CORRUPT(mp, map->br_startblock == HOLESTARTBLOCK))
552 return -EFSCORRUPTED;
554 error = xfs_buf_incore(mp->m_ddev_targp,
555 XFS_FSB_TO_DADDR(mp, map->br_startblock),
556 XFS_FSB_TO_BB(mp, map->br_blockcount),
559 if (error == -ENOENT)
570 * Find a hole for the attr and store it in the delayed attr context. This
571 * initializes the context to roll through allocating an attr extent for a
572 * delayed attr operation
575 xfs_attr_rmtval_find_space(
576 struct xfs_attr_intent *attr)
578 struct xfs_da_args *args = attr->xattri_da_args;
579 struct xfs_bmbt_irec *map = &attr->xattri_map;
582 attr->xattri_lblkno = 0;
583 attr->xattri_blkcnt = 0;
586 memset(map, 0, sizeof(struct xfs_bmbt_irec));
588 error = xfs_attr_rmt_find_hole(args);
592 attr->xattri_blkcnt = args->rmtblkcnt;
593 attr->xattri_lblkno = args->rmtblkno;
599 * Write one block of the value associated with an attribute into the
600 * out-of-line buffer that we have defined for it. This is similar to a subset
601 * of xfs_attr_rmtval_set, but records the current block to the delayed attr
602 * context, and leaves transaction handling to the caller.
605 xfs_attr_rmtval_set_blk(
606 struct xfs_attr_intent *attr)
608 struct xfs_da_args *args = attr->xattri_da_args;
609 struct xfs_inode *dp = args->dp;
610 struct xfs_bmbt_irec *map = &attr->xattri_map;
615 error = xfs_bmapi_write(args->trans, dp,
616 (xfs_fileoff_t)attr->xattri_lblkno,
617 attr->xattri_blkcnt, XFS_BMAPI_ATTRFORK, args->total,
623 ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
624 (map->br_startblock != HOLESTARTBLOCK));
626 /* roll attribute extent map forwards */
627 attr->xattri_lblkno += map->br_blockcount;
628 attr->xattri_blkcnt -= map->br_blockcount;
634 * Remove the value associated with an attribute by deleting the
635 * out-of-line buffer that it is stored on.
638 xfs_attr_rmtval_invalidate(
639 struct xfs_da_args *args)
646 * Roll through the "value", invalidating the attribute value's blocks.
648 lblkno = args->rmtblkno;
649 blkcnt = args->rmtblkcnt;
651 struct xfs_bmbt_irec map;
655 * Try to remember where we decided to put the value.
658 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
659 blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
662 if (XFS_IS_CORRUPT(args->dp->i_mount, nmap != 1))
663 return -EFSCORRUPTED;
664 error = xfs_attr_rmtval_stale(args->dp, &map, XBF_TRYLOCK);
668 lblkno += map.br_blockcount;
669 blkcnt -= map.br_blockcount;
675 * Remove the value associated with an attribute by deleting the out-of-line
676 * buffer that it is stored on. Returns -EAGAIN for the caller to refresh the
677 * transaction and re-call the function. Callers should keep calling this
678 * routine until it returns something other than -EAGAIN.
681 xfs_attr_rmtval_remove(
682 struct xfs_attr_intent *attr)
684 struct xfs_da_args *args = attr->xattri_da_args;
688 * Unmap value blocks for this attr.
690 error = xfs_bunmapi(args->trans, args->dp, args->rmtblkno,
691 args->rmtblkcnt, XFS_BMAPI_ATTRFORK, 1, &done);
696 * We don't need an explicit state here to pick up where we left off. We
697 * can figure it out using the !done return code. The actual value of
698 * attr->xattri_dela_state may be some value reminiscent of the calling
699 * function, but it's value is irrelevant with in the context of this
700 * function. Once we are done here, the next state is set as needed by
704 trace_xfs_attr_rmtval_remove_return(attr->xattri_dela_state,