From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- fs/xfs/scrub/agheader.c | 930 +++++++++++++++++++++++++++++++++++++++ fs/xfs/scrub/agheader_repair.c | 947 ++++++++++++++++++++++++++++++++++++++++ fs/xfs/scrub/alloc.c | 155 +++++++ fs/xfs/scrub/attr.c | 527 ++++++++++++++++++++++ fs/xfs/scrub/attr.h | 68 +++ fs/xfs/scrub/bitmap.c | 314 ++++++++++++++ fs/xfs/scrub/bitmap.h | 37 ++ fs/xfs/scrub/bmap.c | 741 +++++++++++++++++++++++++++++++ fs/xfs/scrub/btree.c | 748 ++++++++++++++++++++++++++++++++ fs/xfs/scrub/btree.h | 62 +++ fs/xfs/scrub/common.c | 867 +++++++++++++++++++++++++++++++++++++ fs/xfs/scrub/common.h | 152 +++++++ fs/xfs/scrub/dabtree.c | 596 +++++++++++++++++++++++++ fs/xfs/scrub/dabtree.h | 43 ++ fs/xfs/scrub/dir.c | 876 +++++++++++++++++++++++++++++++++++++ fs/xfs/scrub/fscounters.c | 381 ++++++++++++++++ fs/xfs/scrub/health.c | 233 ++++++++++ fs/xfs/scrub/health.h | 14 + fs/xfs/scrub/ialloc.c | 659 ++++++++++++++++++++++++++++ fs/xfs/scrub/inode.c | 628 +++++++++++++++++++++++++++ fs/xfs/scrub/parent.c | 334 ++++++++++++++ fs/xfs/scrub/quota.c | 248 +++++++++++ fs/xfs/scrub/refcount.c | 473 ++++++++++++++++++++ fs/xfs/scrub/repair.c | 963 +++++++++++++++++++++++++++++++++++++++++ fs/xfs/scrub/repair.h | 94 ++++ fs/xfs/scrub/rmap.c | 235 ++++++++++ fs/xfs/scrub/rtbitmap.c | 193 +++++++++ fs/xfs/scrub/scrub.c | 566 ++++++++++++++++++++++++ fs/xfs/scrub/scrub.h | 171 ++++++++ fs/xfs/scrub/symlink.c | 69 +++ fs/xfs/scrub/trace.c | 40 ++ fs/xfs/scrub/trace.h | 920 +++++++++++++++++++++++++++++++++++++++ fs/xfs/scrub/xfs_scrub.h | 15 + 33 files changed, 13299 insertions(+) create mode 100644 fs/xfs/scrub/agheader.c create mode 100644 fs/xfs/scrub/agheader_repair.c create mode 100644 fs/xfs/scrub/alloc.c create mode 100644 fs/xfs/scrub/attr.c create mode 100644 fs/xfs/scrub/attr.h create mode 100644 fs/xfs/scrub/bitmap.c create mode 100644 fs/xfs/scrub/bitmap.h create mode 100644 fs/xfs/scrub/bmap.c create mode 100644 fs/xfs/scrub/btree.c create mode 100644 fs/xfs/scrub/btree.h create mode 100644 fs/xfs/scrub/common.c create mode 100644 fs/xfs/scrub/common.h create mode 100644 fs/xfs/scrub/dabtree.c create mode 100644 fs/xfs/scrub/dabtree.h create mode 100644 fs/xfs/scrub/dir.c create mode 100644 fs/xfs/scrub/fscounters.c create mode 100644 fs/xfs/scrub/health.c create mode 100644 fs/xfs/scrub/health.h create mode 100644 fs/xfs/scrub/ialloc.c create mode 100644 fs/xfs/scrub/inode.c create mode 100644 fs/xfs/scrub/parent.c create mode 100644 fs/xfs/scrub/quota.c create mode 100644 fs/xfs/scrub/refcount.c create mode 100644 fs/xfs/scrub/repair.c create mode 100644 fs/xfs/scrub/repair.h create mode 100644 fs/xfs/scrub/rmap.c create mode 100644 fs/xfs/scrub/rtbitmap.c create mode 100644 fs/xfs/scrub/scrub.c create mode 100644 fs/xfs/scrub/scrub.h create mode 100644 fs/xfs/scrub/symlink.c create mode 100644 fs/xfs/scrub/trace.c create mode 100644 fs/xfs/scrub/trace.h create mode 100644 fs/xfs/scrub/xfs_scrub.h (limited to 'fs/xfs/scrub') diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c new file mode 100644 index 000000000..b7b838bd4 --- /dev/null +++ b/fs/xfs/scrub/agheader.c @@ -0,0 +1,930 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_sb.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_rmap.h" +#include "xfs_ag.h" +#include "scrub/scrub.h" +#include "scrub/common.h" + +/* Superblock */ + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_superblock_xref( + struct xfs_scrub *sc, + struct xfs_buf *bp) +{ + struct xfs_mount *mp = sc->mp; + xfs_agnumber_t agno = sc->sm->sm_agno; + xfs_agblock_t agbno; + int error; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + agbno = XFS_SB_BLOCK(mp); + + error = xchk_ag_init_existing(sc, agno, &sc->sa); + if (!xchk_xref_process_error(sc, agno, agbno, &error)) + return; + + xchk_xref_is_used_space(sc, agbno, 1); + xchk_xref_is_not_inode_chunk(sc, agbno, 1); + xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); + xchk_xref_is_not_shared(sc, agbno, 1); + + /* scrub teardown will take care of sc->sa for us */ +} + +/* + * Scrub the filesystem superblock. + * + * Note: We do /not/ attempt to check AG 0's superblock. Mount is + * responsible for validating all the geometry information in sb 0, so + * if the filesystem is capable of initiating online scrub, then clearly + * sb 0 is ok and we can use its information to check everything else. + */ +int +xchk_superblock( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_buf *bp; + struct xfs_dsb *sb; + struct xfs_perag *pag; + xfs_agnumber_t agno; + uint32_t v2_ok; + __be32 features_mask; + int error; + __be16 vernum_mask; + + agno = sc->sm->sm_agno; + if (agno == 0) + return 0; + + /* + * Grab an active reference to the perag structure. If we can't get + * it, we're racing with something that's tearing down the AG, so + * signal that the AG no longer exists. + */ + pag = xfs_perag_get(mp, agno); + if (!pag) + return -ENOENT; + + error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp); + /* + * The superblock verifier can return several different error codes + * if it thinks the superblock doesn't look right. For a mount these + * would all get bounced back to userspace, but if we're here then the + * fs mounted successfully, which means that this secondary superblock + * is simply incorrect. Treat all these codes the same way we treat + * any corruption. + */ + switch (error) { + case -EINVAL: /* also -EWRONGFS */ + case -ENOSYS: + case -EFBIG: + error = -EFSCORRUPTED; + fallthrough; + default: + break; + } + if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error)) + goto out_pag; + + sb = bp->b_addr; + + /* + * Verify the geometries match. Fields that are permanently + * set by mkfs are checked; fields that can be updated later + * (and are not propagated to backup superblocks) are preen + * checked. + */ + if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents)) + xchk_block_set_corrupt(sc, bp); + + if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid)) + xchk_block_set_preen(sc, bp); + + if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino)) + xchk_block_set_preen(sc, bp); + + if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino)) + xchk_block_set_preen(sc, bp); + + if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino)) + xchk_block_set_preen(sc, bp); + + if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks)) + xchk_block_set_corrupt(sc, bp); + + /* Check sb_versionnum bits that are set at mkfs time. */ + vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS | + XFS_SB_VERSION_NUMBITS | + XFS_SB_VERSION_ALIGNBIT | + XFS_SB_VERSION_DALIGNBIT | + XFS_SB_VERSION_SHAREDBIT | + XFS_SB_VERSION_LOGV2BIT | + XFS_SB_VERSION_SECTORBIT | + XFS_SB_VERSION_EXTFLGBIT | + XFS_SB_VERSION_DIRV2BIT); + if ((sb->sb_versionnum & vernum_mask) != + (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) + xchk_block_set_corrupt(sc, bp); + + /* Check sb_versionnum bits that can be set after mkfs time. */ + vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT | + XFS_SB_VERSION_NLINKBIT | + XFS_SB_VERSION_QUOTABIT); + if ((sb->sb_versionnum & vernum_mask) != + (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) + xchk_block_set_preen(sc, bp); + + if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock)) + xchk_block_set_corrupt(sc, bp); + + if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname))) + xchk_block_set_preen(sc, bp); + + if (sb->sb_blocklog != mp->m_sb.sb_blocklog) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_sectlog != mp->m_sb.sb_sectlog) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_inodelog != mp->m_sb.sb_inodelog) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_inopblog != mp->m_sb.sb_inopblog) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_agblklog != mp->m_sb.sb_agblklog) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_rextslog != mp->m_sb.sb_rextslog) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct) + xchk_block_set_preen(sc, bp); + + /* + * Skip the summary counters since we track them in memory anyway. + * sb_icount, sb_ifree, sb_fdblocks, sb_frexents + */ + + if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino)) + xchk_block_set_preen(sc, bp); + + if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino)) + xchk_block_set_preen(sc, bp); + + /* + * Skip the quota flags since repair will force quotacheck. + * sb_qflags + */ + + if (sb->sb_flags != mp->m_sb.sb_flags) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit)) + xchk_block_set_preen(sc, bp); + + if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width)) + xchk_block_set_preen(sc, bp); + + if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit)) + xchk_block_set_corrupt(sc, bp); + + /* Do we see any invalid bits in sb_features2? */ + if (!xfs_sb_version_hasmorebits(&mp->m_sb)) { + if (sb->sb_features2 != 0) + xchk_block_set_corrupt(sc, bp); + } else { + v2_ok = XFS_SB_VERSION2_OKBITS; + if (xfs_sb_is_v5(&mp->m_sb)) + v2_ok |= XFS_SB_VERSION2_CRCBIT; + + if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok))) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_features2 != sb->sb_bad_features2) + xchk_block_set_preen(sc, bp); + } + + /* Check sb_features2 flags that are set at mkfs time. */ + features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT | + XFS_SB_VERSION2_PROJID32BIT | + XFS_SB_VERSION2_CRCBIT | + XFS_SB_VERSION2_FTYPE); + if ((sb->sb_features2 & features_mask) != + (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) + xchk_block_set_corrupt(sc, bp); + + /* Check sb_features2 flags that can be set after mkfs time. */ + features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT); + if ((sb->sb_features2 & features_mask) != + (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) + xchk_block_set_preen(sc, bp); + + if (!xfs_has_crc(mp)) { + /* all v5 fields must be zero */ + if (memchr_inv(&sb->sb_features_compat, 0, + sizeof(struct xfs_dsb) - + offsetof(struct xfs_dsb, sb_features_compat))) + xchk_block_set_corrupt(sc, bp); + } else { + /* compat features must match */ + if (sb->sb_features_compat != + cpu_to_be32(mp->m_sb.sb_features_compat)) + xchk_block_set_corrupt(sc, bp); + + /* ro compat features must match */ + if (sb->sb_features_ro_compat != + cpu_to_be32(mp->m_sb.sb_features_ro_compat)) + xchk_block_set_corrupt(sc, bp); + + /* + * NEEDSREPAIR is ignored on a secondary super, so we should + * clear it when we find it, though it's not a corruption. + */ + features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR); + if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^ + sb->sb_features_incompat) & features_mask) + xchk_block_set_preen(sc, bp); + + /* all other incompat features must match */ + if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^ + sb->sb_features_incompat) & ~features_mask) + xchk_block_set_corrupt(sc, bp); + + /* + * log incompat features protect newer log record types from + * older log recovery code. Log recovery doesn't check the + * secondary supers, so we can clear these if needed. + */ + if (sb->sb_features_log_incompat) + xchk_block_set_preen(sc, bp); + + /* Don't care about sb_crc */ + + if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align)) + xchk_block_set_corrupt(sc, bp); + + if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino)) + xchk_block_set_preen(sc, bp); + + /* Don't care about sb_lsn */ + } + + if (xfs_has_metauuid(mp)) { + /* The metadata UUID must be the same for all supers */ + if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid)) + xchk_block_set_corrupt(sc, bp); + } + + /* Everything else must be zero. */ + if (memchr_inv(sb + 1, 0, + BBTOB(bp->b_length) - sizeof(struct xfs_dsb))) + xchk_block_set_corrupt(sc, bp); + + xchk_superblock_xref(sc, bp); +out_pag: + xfs_perag_put(pag); + return error; +} + +/* AGF */ + +/* Tally freespace record lengths. */ +STATIC int +xchk_agf_record_bno_lengths( + struct xfs_btree_cur *cur, + const struct xfs_alloc_rec_incore *rec, + void *priv) +{ + xfs_extlen_t *blocks = priv; + + (*blocks) += rec->ar_blockcount; + return 0; +} + +/* Check agf_freeblks */ +static inline void +xchk_agf_xref_freeblks( + struct xfs_scrub *sc) +{ + struct xfs_agf *agf = sc->sa.agf_bp->b_addr; + xfs_extlen_t blocks = 0; + int error; + + if (!sc->sa.bno_cur) + return; + + error = xfs_alloc_query_all(sc->sa.bno_cur, + xchk_agf_record_bno_lengths, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) + return; + if (blocks != be32_to_cpu(agf->agf_freeblks)) + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); +} + +/* Cross reference the AGF with the cntbt (freespace by length btree) */ +static inline void +xchk_agf_xref_cntbt( + struct xfs_scrub *sc) +{ + struct xfs_agf *agf = sc->sa.agf_bp->b_addr; + xfs_agblock_t agbno; + xfs_extlen_t blocks; + int have; + int error; + + if (!sc->sa.cnt_cur) + return; + + /* Any freespace at all? */ + error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have); + if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) + return; + if (!have) { + if (agf->agf_freeblks != cpu_to_be32(0)) + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); + return; + } + + /* Check agf_longest */ + error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have); + if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) + return; + if (!have || blocks != be32_to_cpu(agf->agf_longest)) + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); +} + +/* Check the btree block counts in the AGF against the btrees. */ +STATIC void +xchk_agf_xref_btreeblks( + struct xfs_scrub *sc) +{ + struct xfs_agf *agf = sc->sa.agf_bp->b_addr; + struct xfs_mount *mp = sc->mp; + xfs_agblock_t blocks; + xfs_agblock_t btreeblks; + int error; + + /* agf_btreeblks didn't exist before lazysbcount */ + if (!xfs_has_lazysbcount(sc->mp)) + return; + + /* Check agf_rmap_blocks; set up for agf_btreeblks check */ + if (sc->sa.rmap_cur) { + error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) + return; + btreeblks = blocks - 1; + if (blocks != be32_to_cpu(agf->agf_rmap_blocks)) + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); + } else { + btreeblks = 0; + } + + /* + * No rmap cursor; we can't xref if we have the rmapbt feature. + * We also can't do it if we're missing the free space btree cursors. + */ + if ((xfs_has_rmapbt(mp) && !sc->sa.rmap_cur) || + !sc->sa.bno_cur || !sc->sa.cnt_cur) + return; + + /* Check agf_btreeblks */ + error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) + return; + btreeblks += blocks - 1; + + error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) + return; + btreeblks += blocks - 1; + + if (btreeblks != be32_to_cpu(agf->agf_btreeblks)) + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); +} + +/* Check agf_refcount_blocks against tree size */ +static inline void +xchk_agf_xref_refcblks( + struct xfs_scrub *sc) +{ + struct xfs_agf *agf = sc->sa.agf_bp->b_addr; + xfs_agblock_t blocks; + int error; + + if (!sc->sa.refc_cur) + return; + + error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) + return; + if (blocks != be32_to_cpu(agf->agf_refcount_blocks)) + xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); +} + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_agf_xref( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + xfs_agblock_t agbno; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + agbno = XFS_AGF_BLOCK(mp); + + xchk_ag_btcur_init(sc, &sc->sa); + + xchk_xref_is_used_space(sc, agbno, 1); + xchk_agf_xref_freeblks(sc); + xchk_agf_xref_cntbt(sc); + xchk_xref_is_not_inode_chunk(sc, agbno, 1); + xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); + xchk_agf_xref_btreeblks(sc); + xchk_xref_is_not_shared(sc, agbno, 1); + xchk_agf_xref_refcblks(sc); + + /* scrub teardown will take care of sc->sa for us */ +} + +/* Scrub the AGF. */ +int +xchk_agf( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_agf *agf; + struct xfs_perag *pag; + xfs_agnumber_t agno = sc->sm->sm_agno; + xfs_agblock_t agbno; + xfs_agblock_t eoag; + xfs_agblock_t agfl_first; + xfs_agblock_t agfl_last; + xfs_agblock_t agfl_count; + xfs_agblock_t fl_count; + int level; + int error = 0; + + error = xchk_ag_read_headers(sc, agno, &sc->sa); + if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error)) + goto out; + xchk_buffer_recheck(sc, sc->sa.agf_bp); + + agf = sc->sa.agf_bp->b_addr; + pag = sc->sa.pag; + + /* Check the AG length */ + eoag = be32_to_cpu(agf->agf_length); + if (eoag != pag->block_count) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + + /* Check the AGF btree roots and levels */ + agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]); + if (!xfs_verify_agbno(pag, agbno)) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + + agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]); + if (!xfs_verify_agbno(pag, agbno)) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + + level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); + if (level <= 0 || level > mp->m_alloc_maxlevels) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + + level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); + if (level <= 0 || level > mp->m_alloc_maxlevels) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + + if (xfs_has_rmapbt(mp)) { + agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]); + if (!xfs_verify_agbno(pag, agbno)) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + + level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]); + if (level <= 0 || level > mp->m_rmap_maxlevels) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + } + + if (xfs_has_reflink(mp)) { + agbno = be32_to_cpu(agf->agf_refcount_root); + if (!xfs_verify_agbno(pag, agbno)) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + + level = be32_to_cpu(agf->agf_refcount_level); + if (level <= 0 || level > mp->m_refc_maxlevels) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + } + + /* Check the AGFL counters */ + agfl_first = be32_to_cpu(agf->agf_flfirst); + agfl_last = be32_to_cpu(agf->agf_fllast); + agfl_count = be32_to_cpu(agf->agf_flcount); + if (agfl_last > agfl_first) + fl_count = agfl_last - agfl_first + 1; + else + fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1; + if (agfl_count != 0 && fl_count != agfl_count) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + + /* Do the incore counters match? */ + if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks)) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount)) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + if (xfs_has_lazysbcount(sc->mp) && + pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks)) + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + + xchk_agf_xref(sc); +out: + return error; +} + +/* AGFL */ + +struct xchk_agfl_info { + unsigned int sz_entries; + unsigned int nr_entries; + xfs_agblock_t *entries; + struct xfs_scrub *sc; +}; + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_agfl_block_xref( + struct xfs_scrub *sc, + xfs_agblock_t agbno) +{ + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + xchk_xref_is_used_space(sc, agbno, 1); + xchk_xref_is_not_inode_chunk(sc, agbno, 1); + xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG); + xchk_xref_is_not_shared(sc, agbno, 1); +} + +/* Scrub an AGFL block. */ +STATIC int +xchk_agfl_block( + struct xfs_mount *mp, + xfs_agblock_t agbno, + void *priv) +{ + struct xchk_agfl_info *sai = priv; + struct xfs_scrub *sc = sai->sc; + + if (xfs_verify_agbno(sc->sa.pag, agbno) && + sai->nr_entries < sai->sz_entries) + sai->entries[sai->nr_entries++] = agbno; + else + xchk_block_set_corrupt(sc, sc->sa.agfl_bp); + + xchk_agfl_block_xref(sc, agbno); + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return -ECANCELED; + + return 0; +} + +static int +xchk_agblock_cmp( + const void *pa, + const void *pb) +{ + const xfs_agblock_t *a = pa; + const xfs_agblock_t *b = pb; + + return (int)*a - (int)*b; +} + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_agfl_xref( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + xfs_agblock_t agbno; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + agbno = XFS_AGFL_BLOCK(mp); + + xchk_ag_btcur_init(sc, &sc->sa); + + xchk_xref_is_used_space(sc, agbno, 1); + xchk_xref_is_not_inode_chunk(sc, agbno, 1); + xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); + xchk_xref_is_not_shared(sc, agbno, 1); + + /* + * Scrub teardown will take care of sc->sa for us. Leave sc->sa + * active so that the agfl block xref can use it too. + */ +} + +/* Scrub the AGFL. */ +int +xchk_agfl( + struct xfs_scrub *sc) +{ + struct xchk_agfl_info sai; + struct xfs_agf *agf; + xfs_agnumber_t agno = sc->sm->sm_agno; + unsigned int agflcount; + unsigned int i; + int error; + + error = xchk_ag_read_headers(sc, agno, &sc->sa); + if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error)) + goto out; + if (!sc->sa.agf_bp) + return -EFSCORRUPTED; + xchk_buffer_recheck(sc, sc->sa.agfl_bp); + + xchk_agfl_xref(sc); + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + /* Allocate buffer to ensure uniqueness of AGFL entries. */ + agf = sc->sa.agf_bp->b_addr; + agflcount = be32_to_cpu(agf->agf_flcount); + if (agflcount > xfs_agfl_size(sc->mp)) { + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + goto out; + } + memset(&sai, 0, sizeof(sai)); + sai.sc = sc; + sai.sz_entries = agflcount; + sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, + KM_MAYFAIL); + if (!sai.entries) { + error = -ENOMEM; + goto out; + } + + /* Check the blocks in the AGFL. */ + error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr, + sc->sa.agfl_bp, xchk_agfl_block, &sai); + if (error == -ECANCELED) { + error = 0; + goto out_free; + } + if (error) + goto out_free; + + if (agflcount != sai.nr_entries) { + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + goto out_free; + } + + /* Sort entries, check for duplicates. */ + sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), + xchk_agblock_cmp, NULL); + for (i = 1; i < sai.nr_entries; i++) { + if (sai.entries[i] == sai.entries[i - 1]) { + xchk_block_set_corrupt(sc, sc->sa.agf_bp); + break; + } + } + +out_free: + kmem_free(sai.entries); +out: + return error; +} + +/* AGI */ + +/* Check agi_count/agi_freecount */ +static inline void +xchk_agi_xref_icounts( + struct xfs_scrub *sc) +{ + struct xfs_agi *agi = sc->sa.agi_bp->b_addr; + xfs_agino_t icount; + xfs_agino_t freecount; + int error; + + if (!sc->sa.ino_cur) + return; + + error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount); + if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) + return; + if (be32_to_cpu(agi->agi_count) != icount || + be32_to_cpu(agi->agi_freecount) != freecount) + xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); +} + +/* Check agi_[fi]blocks against tree size */ +static inline void +xchk_agi_xref_fiblocks( + struct xfs_scrub *sc) +{ + struct xfs_agi *agi = sc->sa.agi_bp->b_addr; + xfs_agblock_t blocks; + int error = 0; + + if (!xfs_has_inobtcounts(sc->mp)) + return; + + if (sc->sa.ino_cur) { + error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) + return; + if (blocks != be32_to_cpu(agi->agi_iblocks)) + xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); + } + + if (sc->sa.fino_cur) { + error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur)) + return; + if (blocks != be32_to_cpu(agi->agi_fblocks)) + xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); + } +} + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_agi_xref( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + xfs_agblock_t agbno; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + agbno = XFS_AGI_BLOCK(mp); + + xchk_ag_btcur_init(sc, &sc->sa); + + xchk_xref_is_used_space(sc, agbno, 1); + xchk_xref_is_not_inode_chunk(sc, agbno, 1); + xchk_agi_xref_icounts(sc); + xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); + xchk_xref_is_not_shared(sc, agbno, 1); + xchk_agi_xref_fiblocks(sc); + + /* scrub teardown will take care of sc->sa for us */ +} + +/* Scrub the AGI. */ +int +xchk_agi( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_agi *agi; + struct xfs_perag *pag; + struct xfs_ino_geometry *igeo = M_IGEO(sc->mp); + xfs_agnumber_t agno = sc->sm->sm_agno; + xfs_agblock_t agbno; + xfs_agblock_t eoag; + xfs_agino_t agino; + xfs_agino_t first_agino; + xfs_agino_t last_agino; + xfs_agino_t icount; + int i; + int level; + int error = 0; + + error = xchk_ag_read_headers(sc, agno, &sc->sa); + if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error)) + goto out; + xchk_buffer_recheck(sc, sc->sa.agi_bp); + + agi = sc->sa.agi_bp->b_addr; + pag = sc->sa.pag; + + /* Check the AG length */ + eoag = be32_to_cpu(agi->agi_length); + if (eoag != pag->block_count) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + + /* Check btree roots and levels */ + agbno = be32_to_cpu(agi->agi_root); + if (!xfs_verify_agbno(pag, agbno)) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + + level = be32_to_cpu(agi->agi_level); + if (level <= 0 || level > igeo->inobt_maxlevels) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + + if (xfs_has_finobt(mp)) { + agbno = be32_to_cpu(agi->agi_free_root); + if (!xfs_verify_agbno(pag, agbno)) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + + level = be32_to_cpu(agi->agi_free_level); + if (level <= 0 || level > igeo->inobt_maxlevels) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + } + + /* Check inode counters */ + xfs_agino_range(mp, agno, &first_agino, &last_agino); + icount = be32_to_cpu(agi->agi_count); + if (icount > last_agino - first_agino + 1 || + icount < be32_to_cpu(agi->agi_freecount)) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + + /* Check inode pointers */ + agino = be32_to_cpu(agi->agi_newino); + if (!xfs_verify_agino_or_null(pag, agino)) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + + agino = be32_to_cpu(agi->agi_dirino); + if (!xfs_verify_agino_or_null(pag, agino)) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + + /* Check unlinked inode buckets */ + for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { + agino = be32_to_cpu(agi->agi_unlinked[i]); + if (!xfs_verify_agino_or_null(pag, agino)) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + } + + if (agi->agi_pad32 != cpu_to_be32(0)) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + + /* Do the incore counters match? */ + if (pag->pagi_count != be32_to_cpu(agi->agi_count)) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount)) + xchk_block_set_corrupt(sc, sc->sa.agi_bp); + + xchk_agi_xref(sc); +out: + return error; +} diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c new file mode 100644 index 000000000..1b0b4e243 --- /dev/null +++ b/fs/xfs/scrub/agheader_repair.c @@ -0,0 +1,947 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_alloc.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc.h" +#include "xfs_ialloc_btree.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "xfs_refcount_btree.h" +#include "xfs_ag.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/trace.h" +#include "scrub/repair.h" +#include "scrub/bitmap.h" + +/* Superblock */ + +/* Repair the superblock. */ +int +xrep_superblock( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_buf *bp; + xfs_agnumber_t agno; + int error; + + /* Don't try to repair AG 0's sb; let xfs_repair deal with it. */ + agno = sc->sm->sm_agno; + if (agno == 0) + return -EOPNOTSUPP; + + error = xfs_sb_get_secondary(mp, sc->tp, agno, &bp); + if (error) + return error; + + /* Copy AG 0's superblock to this one. */ + xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); + xfs_sb_to_disk(bp->b_addr, &mp->m_sb); + + /* + * Don't write out a secondary super with NEEDSREPAIR or log incompat + * features set, since both are ignored when set on a secondary. + */ + if (xfs_has_crc(mp)) { + struct xfs_dsb *sb = bp->b_addr; + + sb->sb_features_incompat &= + ~cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR); + sb->sb_features_log_incompat = 0; + } + + /* Write this to disk. */ + xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF); + xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1); + return error; +} + +/* AGF */ + +struct xrep_agf_allocbt { + struct xfs_scrub *sc; + xfs_agblock_t freeblks; + xfs_agblock_t longest; +}; + +/* Record free space shape information. */ +STATIC int +xrep_agf_walk_allocbt( + struct xfs_btree_cur *cur, + const struct xfs_alloc_rec_incore *rec, + void *priv) +{ + struct xrep_agf_allocbt *raa = priv; + int error = 0; + + if (xchk_should_terminate(raa->sc, &error)) + return error; + + raa->freeblks += rec->ar_blockcount; + if (rec->ar_blockcount > raa->longest) + raa->longest = rec->ar_blockcount; + return error; +} + +/* Does this AGFL block look sane? */ +STATIC int +xrep_agf_check_agfl_block( + struct xfs_mount *mp, + xfs_agblock_t agbno, + void *priv) +{ + struct xfs_scrub *sc = priv; + + if (!xfs_verify_agbno(sc->sa.pag, agbno)) + return -EFSCORRUPTED; + return 0; +} + +/* + * Offset within the xrep_find_ag_btree array for each btree type. Avoid the + * XFS_BTNUM_ names here to avoid creating a sparse array. + */ +enum { + XREP_AGF_BNOBT = 0, + XREP_AGF_CNTBT, + XREP_AGF_RMAPBT, + XREP_AGF_REFCOUNTBT, + XREP_AGF_END, + XREP_AGF_MAX +}; + +/* Check a btree root candidate. */ +static inline bool +xrep_check_btree_root( + struct xfs_scrub *sc, + struct xrep_find_ag_btree *fab) +{ + return xfs_verify_agbno(sc->sa.pag, fab->root) && + fab->height <= fab->maxlevels; +} + +/* + * Given the btree roots described by *fab, find the roots, check them for + * sanity, and pass the root data back out via *fab. + * + * This is /also/ a chicken and egg problem because we have to use the rmapbt + * (rooted in the AGF) to find the btrees rooted in the AGF. We also have no + * idea if the btrees make any sense. If we hit obvious corruptions in those + * btrees we'll bail out. + */ +STATIC int +xrep_agf_find_btrees( + struct xfs_scrub *sc, + struct xfs_buf *agf_bp, + struct xrep_find_ag_btree *fab, + struct xfs_buf *agfl_bp) +{ + struct xfs_agf *old_agf = agf_bp->b_addr; + int error; + + /* Go find the root data. */ + error = xrep_find_ag_btree_roots(sc, agf_bp, fab, agfl_bp); + if (error) + return error; + + /* We must find the bnobt, cntbt, and rmapbt roots. */ + if (!xrep_check_btree_root(sc, &fab[XREP_AGF_BNOBT]) || + !xrep_check_btree_root(sc, &fab[XREP_AGF_CNTBT]) || + !xrep_check_btree_root(sc, &fab[XREP_AGF_RMAPBT])) + return -EFSCORRUPTED; + + /* + * We relied on the rmapbt to reconstruct the AGF. If we get a + * different root then something's seriously wrong. + */ + if (fab[XREP_AGF_RMAPBT].root != + be32_to_cpu(old_agf->agf_roots[XFS_BTNUM_RMAPi])) + return -EFSCORRUPTED; + + /* We must find the refcountbt root if that feature is enabled. */ + if (xfs_has_reflink(sc->mp) && + !xrep_check_btree_root(sc, &fab[XREP_AGF_REFCOUNTBT])) + return -EFSCORRUPTED; + + return 0; +} + +/* + * Reinitialize the AGF header, making an in-core copy of the old contents so + * that we know which in-core state needs to be reinitialized. + */ +STATIC void +xrep_agf_init_header( + struct xfs_scrub *sc, + struct xfs_buf *agf_bp, + struct xfs_agf *old_agf) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_agf *agf = agf_bp->b_addr; + + memcpy(old_agf, agf, sizeof(*old_agf)); + memset(agf, 0, BBTOB(agf_bp->b_length)); + agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); + agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); + agf->agf_seqno = cpu_to_be32(sc->sa.pag->pag_agno); + agf->agf_length = cpu_to_be32(sc->sa.pag->block_count); + agf->agf_flfirst = old_agf->agf_flfirst; + agf->agf_fllast = old_agf->agf_fllast; + agf->agf_flcount = old_agf->agf_flcount; + if (xfs_has_crc(mp)) + uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid); + + /* Mark the incore AGF data stale until we're done fixing things. */ + ASSERT(sc->sa.pag->pagf_init); + sc->sa.pag->pagf_init = 0; +} + +/* Set btree root information in an AGF. */ +STATIC void +xrep_agf_set_roots( + struct xfs_scrub *sc, + struct xfs_agf *agf, + struct xrep_find_ag_btree *fab) +{ + agf->agf_roots[XFS_BTNUM_BNOi] = + cpu_to_be32(fab[XREP_AGF_BNOBT].root); + agf->agf_levels[XFS_BTNUM_BNOi] = + cpu_to_be32(fab[XREP_AGF_BNOBT].height); + + agf->agf_roots[XFS_BTNUM_CNTi] = + cpu_to_be32(fab[XREP_AGF_CNTBT].root); + agf->agf_levels[XFS_BTNUM_CNTi] = + cpu_to_be32(fab[XREP_AGF_CNTBT].height); + + agf->agf_roots[XFS_BTNUM_RMAPi] = + cpu_to_be32(fab[XREP_AGF_RMAPBT].root); + agf->agf_levels[XFS_BTNUM_RMAPi] = + cpu_to_be32(fab[XREP_AGF_RMAPBT].height); + + if (xfs_has_reflink(sc->mp)) { + agf->agf_refcount_root = + cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].root); + agf->agf_refcount_level = + cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].height); + } +} + +/* Update all AGF fields which derive from btree contents. */ +STATIC int +xrep_agf_calc_from_btrees( + struct xfs_scrub *sc, + struct xfs_buf *agf_bp) +{ + struct xrep_agf_allocbt raa = { .sc = sc }; + struct xfs_btree_cur *cur = NULL; + struct xfs_agf *agf = agf_bp->b_addr; + struct xfs_mount *mp = sc->mp; + xfs_agblock_t btreeblks; + xfs_agblock_t blocks; + int error; + + /* Update the AGF counters from the bnobt. */ + cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, + sc->sa.pag, XFS_BTNUM_BNO); + error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa); + if (error) + goto err; + error = xfs_btree_count_blocks(cur, &blocks); + if (error) + goto err; + xfs_btree_del_cursor(cur, error); + btreeblks = blocks - 1; + agf->agf_freeblks = cpu_to_be32(raa.freeblks); + agf->agf_longest = cpu_to_be32(raa.longest); + + /* Update the AGF counters from the cntbt. */ + cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, + sc->sa.pag, XFS_BTNUM_CNT); + error = xfs_btree_count_blocks(cur, &blocks); + if (error) + goto err; + xfs_btree_del_cursor(cur, error); + btreeblks += blocks - 1; + + /* Update the AGF counters from the rmapbt. */ + cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); + error = xfs_btree_count_blocks(cur, &blocks); + if (error) + goto err; + xfs_btree_del_cursor(cur, error); + agf->agf_rmap_blocks = cpu_to_be32(blocks); + btreeblks += blocks - 1; + + agf->agf_btreeblks = cpu_to_be32(btreeblks); + + /* Update the AGF counters from the refcountbt. */ + if (xfs_has_reflink(mp)) { + cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp, + sc->sa.pag); + error = xfs_btree_count_blocks(cur, &blocks); + if (error) + goto err; + xfs_btree_del_cursor(cur, error); + agf->agf_refcount_blocks = cpu_to_be32(blocks); + } + + return 0; +err: + xfs_btree_del_cursor(cur, error); + return error; +} + +/* Commit the new AGF and reinitialize the incore state. */ +STATIC int +xrep_agf_commit_new( + struct xfs_scrub *sc, + struct xfs_buf *agf_bp) +{ + struct xfs_perag *pag; + struct xfs_agf *agf = agf_bp->b_addr; + + /* Trigger fdblocks recalculation */ + xfs_force_summary_recalc(sc->mp); + + /* Write this to disk. */ + xfs_trans_buf_set_type(sc->tp, agf_bp, XFS_BLFT_AGF_BUF); + xfs_trans_log_buf(sc->tp, agf_bp, 0, BBTOB(agf_bp->b_length) - 1); + + /* Now reinitialize the in-core counters we changed. */ + pag = sc->sa.pag; + pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks); + pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); + pag->pagf_longest = be32_to_cpu(agf->agf_longest); + pag->pagf_levels[XFS_BTNUM_BNOi] = + be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]); + pag->pagf_levels[XFS_BTNUM_CNTi] = + be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); + pag->pagf_levels[XFS_BTNUM_RMAPi] = + be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]); + pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level); + pag->pagf_init = 1; + + return 0; +} + +/* Repair the AGF. v5 filesystems only. */ +int +xrep_agf( + struct xfs_scrub *sc) +{ + struct xrep_find_ag_btree fab[XREP_AGF_MAX] = { + [XREP_AGF_BNOBT] = { + .rmap_owner = XFS_RMAP_OWN_AG, + .buf_ops = &xfs_bnobt_buf_ops, + .maxlevels = sc->mp->m_alloc_maxlevels, + }, + [XREP_AGF_CNTBT] = { + .rmap_owner = XFS_RMAP_OWN_AG, + .buf_ops = &xfs_cntbt_buf_ops, + .maxlevels = sc->mp->m_alloc_maxlevels, + }, + [XREP_AGF_RMAPBT] = { + .rmap_owner = XFS_RMAP_OWN_AG, + .buf_ops = &xfs_rmapbt_buf_ops, + .maxlevels = sc->mp->m_rmap_maxlevels, + }, + [XREP_AGF_REFCOUNTBT] = { + .rmap_owner = XFS_RMAP_OWN_REFC, + .buf_ops = &xfs_refcountbt_buf_ops, + .maxlevels = sc->mp->m_refc_maxlevels, + }, + [XREP_AGF_END] = { + .buf_ops = NULL, + }, + }; + struct xfs_agf old_agf; + struct xfs_mount *mp = sc->mp; + struct xfs_buf *agf_bp; + struct xfs_buf *agfl_bp; + struct xfs_agf *agf; + int error; + + /* We require the rmapbt to rebuild anything. */ + if (!xfs_has_rmapbt(mp)) + return -EOPNOTSUPP; + + /* + * Make sure we have the AGF buffer, as scrub might have decided it + * was corrupt after xfs_alloc_read_agf failed with -EFSCORRUPTED. + */ + error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, sc->sa.pag->pag_agno, + XFS_AGF_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &agf_bp, NULL); + if (error) + return error; + agf_bp->b_ops = &xfs_agf_buf_ops; + agf = agf_bp->b_addr; + + /* + * Load the AGFL so that we can screen out OWN_AG blocks that are on + * the AGFL now; these blocks might have once been part of the + * bno/cnt/rmap btrees but are not now. This is a chicken and egg + * problem: the AGF is corrupt, so we have to trust the AGFL contents + * because we can't do any serious cross-referencing with any of the + * btrees rooted in the AGF. If the AGFL contents are obviously bad + * then we'll bail out. + */ + error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); + if (error) + return error; + + /* + * Spot-check the AGFL blocks; if they're obviously corrupt then + * there's nothing we can do but bail out. + */ + error = xfs_agfl_walk(sc->mp, agf_bp->b_addr, agfl_bp, + xrep_agf_check_agfl_block, sc); + if (error) + return error; + + /* + * Find the AGF btree roots. This is also a chicken-and-egg situation; + * see the function for more details. + */ + error = xrep_agf_find_btrees(sc, agf_bp, fab, agfl_bp); + if (error) + return error; + + /* Start rewriting the header and implant the btrees we found. */ + xrep_agf_init_header(sc, agf_bp, &old_agf); + xrep_agf_set_roots(sc, agf, fab); + error = xrep_agf_calc_from_btrees(sc, agf_bp); + if (error) + goto out_revert; + + /* Commit the changes and reinitialize incore state. */ + return xrep_agf_commit_new(sc, agf_bp); + +out_revert: + /* Mark the incore AGF state stale and revert the AGF. */ + sc->sa.pag->pagf_init = 0; + memcpy(agf, &old_agf, sizeof(old_agf)); + return error; +} + +/* AGFL */ + +struct xrep_agfl { + /* Bitmap of other OWN_AG metadata blocks. */ + struct xbitmap agmetablocks; + + /* Bitmap of free space. */ + struct xbitmap *freesp; + + struct xfs_scrub *sc; +}; + +/* Record all OWN_AG (free space btree) information from the rmap data. */ +STATIC int +xrep_agfl_walk_rmap( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xrep_agfl *ra = priv; + xfs_fsblock_t fsb; + int error = 0; + + if (xchk_should_terminate(ra->sc, &error)) + return error; + + /* Record all the OWN_AG blocks. */ + if (rec->rm_owner == XFS_RMAP_OWN_AG) { + fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno, + rec->rm_startblock); + error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount); + if (error) + return error; + } + + return xbitmap_set_btcur_path(&ra->agmetablocks, cur); +} + +/* + * Map out all the non-AGFL OWN_AG space in this AG so that we can deduce + * which blocks belong to the AGFL. + * + * Compute the set of old AGFL blocks by subtracting from the list of OWN_AG + * blocks the list of blocks owned by all other OWN_AG metadata (bnobt, cntbt, + * rmapbt). These are the old AGFL blocks, so return that list and the number + * of blocks we're actually going to put back on the AGFL. + */ +STATIC int +xrep_agfl_collect_blocks( + struct xfs_scrub *sc, + struct xfs_buf *agf_bp, + struct xbitmap *agfl_extents, + xfs_agblock_t *flcount) +{ + struct xrep_agfl ra; + struct xfs_mount *mp = sc->mp; + struct xfs_btree_cur *cur; + int error; + + ra.sc = sc; + ra.freesp = agfl_extents; + xbitmap_init(&ra.agmetablocks); + + /* Find all space used by the free space btrees & rmapbt. */ + cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); + error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra); + if (error) + goto err; + xfs_btree_del_cursor(cur, error); + + /* Find all blocks currently being used by the bnobt. */ + cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, + sc->sa.pag, XFS_BTNUM_BNO); + error = xbitmap_set_btblocks(&ra.agmetablocks, cur); + if (error) + goto err; + xfs_btree_del_cursor(cur, error); + + /* Find all blocks currently being used by the cntbt. */ + cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, + sc->sa.pag, XFS_BTNUM_CNT); + error = xbitmap_set_btblocks(&ra.agmetablocks, cur); + if (error) + goto err; + + xfs_btree_del_cursor(cur, error); + + /* + * Drop the freesp meta blocks that are in use by btrees. + * The remaining blocks /should/ be AGFL blocks. + */ + error = xbitmap_disunion(agfl_extents, &ra.agmetablocks); + xbitmap_destroy(&ra.agmetablocks); + if (error) + return error; + + /* + * Calculate the new AGFL size. If we found more blocks than fit in + * the AGFL we'll free them later. + */ + *flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents), + xfs_agfl_size(mp)); + return 0; + +err: + xbitmap_destroy(&ra.agmetablocks); + xfs_btree_del_cursor(cur, error); + return error; +} + +/* Update the AGF and reset the in-core state. */ +STATIC void +xrep_agfl_update_agf( + struct xfs_scrub *sc, + struct xfs_buf *agf_bp, + xfs_agblock_t flcount) +{ + struct xfs_agf *agf = agf_bp->b_addr; + + ASSERT(flcount <= xfs_agfl_size(sc->mp)); + + /* Trigger fdblocks recalculation */ + xfs_force_summary_recalc(sc->mp); + + /* Update the AGF counters. */ + if (sc->sa.pag->pagf_init) + sc->sa.pag->pagf_flcount = flcount; + agf->agf_flfirst = cpu_to_be32(0); + agf->agf_flcount = cpu_to_be32(flcount); + agf->agf_fllast = cpu_to_be32(flcount - 1); + + xfs_alloc_log_agf(sc->tp, agf_bp, + XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); +} + +/* Write out a totally new AGFL. */ +STATIC void +xrep_agfl_init_header( + struct xfs_scrub *sc, + struct xfs_buf *agfl_bp, + struct xbitmap *agfl_extents, + xfs_agblock_t flcount) +{ + struct xfs_mount *mp = sc->mp; + __be32 *agfl_bno; + struct xbitmap_range *br; + struct xbitmap_range *n; + struct xfs_agfl *agfl; + xfs_agblock_t agbno; + unsigned int fl_off; + + ASSERT(flcount <= xfs_agfl_size(mp)); + + /* + * Start rewriting the header by setting the bno[] array to + * NULLAGBLOCK, then setting AGFL header fields. + */ + agfl = XFS_BUF_TO_AGFL(agfl_bp); + memset(agfl, 0xFF, BBTOB(agfl_bp->b_length)); + agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); + agfl->agfl_seqno = cpu_to_be32(sc->sa.pag->pag_agno); + uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid); + + /* + * Fill the AGFL with the remaining blocks. If agfl_extents has more + * blocks than fit in the AGFL, they will be freed in a subsequent + * step. + */ + fl_off = 0; + agfl_bno = xfs_buf_to_agfl_bno(agfl_bp); + for_each_xbitmap_extent(br, n, agfl_extents) { + agbno = XFS_FSB_TO_AGBNO(mp, br->start); + + trace_xrep_agfl_insert(mp, sc->sa.pag->pag_agno, agbno, + br->len); + + while (br->len > 0 && fl_off < flcount) { + agfl_bno[fl_off] = cpu_to_be32(agbno); + fl_off++; + agbno++; + + /* + * We've now used br->start by putting it in the AGFL, + * so bump br so that we don't reap the block later. + */ + br->start++; + br->len--; + } + + if (br->len) + break; + list_del(&br->list); + kmem_free(br); + } + + /* Write new AGFL to disk. */ + xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF); + xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1); +} + +/* Repair the AGFL. */ +int +xrep_agfl( + struct xfs_scrub *sc) +{ + struct xbitmap agfl_extents; + struct xfs_mount *mp = sc->mp; + struct xfs_buf *agf_bp; + struct xfs_buf *agfl_bp; + xfs_agblock_t flcount; + int error; + + /* We require the rmapbt to rebuild anything. */ + if (!xfs_has_rmapbt(mp)) + return -EOPNOTSUPP; + + xbitmap_init(&agfl_extents); + + /* + * Read the AGF so that we can query the rmapbt. We hope that there's + * nothing wrong with the AGF, but all the AG header repair functions + * have this chicken-and-egg problem. + */ + error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); + if (error) + return error; + + /* + * Make sure we have the AGFL buffer, as scrub might have decided it + * was corrupt after xfs_alloc_read_agfl failed with -EFSCORRUPTED. + */ + error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, sc->sa.pag->pag_agno, + XFS_AGFL_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &agfl_bp, NULL); + if (error) + return error; + agfl_bp->b_ops = &xfs_agfl_buf_ops; + + /* Gather all the extents we're going to put on the new AGFL. */ + error = xrep_agfl_collect_blocks(sc, agf_bp, &agfl_extents, &flcount); + if (error) + goto err; + + /* + * Update AGF and AGFL. We reset the global free block counter when + * we adjust the AGF flcount (which can fail) so avoid updating any + * buffers until we know that part works. + */ + xrep_agfl_update_agf(sc, agf_bp, flcount); + xrep_agfl_init_header(sc, agfl_bp, &agfl_extents, flcount); + + /* + * Ok, the AGFL should be ready to go now. Roll the transaction to + * make the new AGFL permanent before we start using it to return + * freespace overflow to the freespace btrees. + */ + sc->sa.agf_bp = agf_bp; + sc->sa.agfl_bp = agfl_bp; + error = xrep_roll_ag_trans(sc); + if (error) + goto err; + + /* Dump any AGFL overflow. */ + error = xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG, + XFS_AG_RESV_AGFL); +err: + xbitmap_destroy(&agfl_extents); + return error; +} + +/* AGI */ + +/* + * Offset within the xrep_find_ag_btree array for each btree type. Avoid the + * XFS_BTNUM_ names here to avoid creating a sparse array. + */ +enum { + XREP_AGI_INOBT = 0, + XREP_AGI_FINOBT, + XREP_AGI_END, + XREP_AGI_MAX +}; + +/* + * Given the inode btree roots described by *fab, find the roots, check them + * for sanity, and pass the root data back out via *fab. + */ +STATIC int +xrep_agi_find_btrees( + struct xfs_scrub *sc, + struct xrep_find_ag_btree *fab) +{ + struct xfs_buf *agf_bp; + struct xfs_mount *mp = sc->mp; + int error; + + /* Read the AGF. */ + error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); + if (error) + return error; + + /* Find the btree roots. */ + error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL); + if (error) + return error; + + /* We must find the inobt root. */ + if (!xrep_check_btree_root(sc, &fab[XREP_AGI_INOBT])) + return -EFSCORRUPTED; + + /* We must find the finobt root if that feature is enabled. */ + if (xfs_has_finobt(mp) && + !xrep_check_btree_root(sc, &fab[XREP_AGI_FINOBT])) + return -EFSCORRUPTED; + + return 0; +} + +/* + * Reinitialize the AGI header, making an in-core copy of the old contents so + * that we know which in-core state needs to be reinitialized. + */ +STATIC void +xrep_agi_init_header( + struct xfs_scrub *sc, + struct xfs_buf *agi_bp, + struct xfs_agi *old_agi) +{ + struct xfs_agi *agi = agi_bp->b_addr; + struct xfs_mount *mp = sc->mp; + + memcpy(old_agi, agi, sizeof(*old_agi)); + memset(agi, 0, BBTOB(agi_bp->b_length)); + agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); + agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); + agi->agi_seqno = cpu_to_be32(sc->sa.pag->pag_agno); + agi->agi_length = cpu_to_be32(sc->sa.pag->block_count); + agi->agi_newino = cpu_to_be32(NULLAGINO); + agi->agi_dirino = cpu_to_be32(NULLAGINO); + if (xfs_has_crc(mp)) + uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid); + + /* We don't know how to fix the unlinked list yet. */ + memcpy(&agi->agi_unlinked, &old_agi->agi_unlinked, + sizeof(agi->agi_unlinked)); + + /* Mark the incore AGF data stale until we're done fixing things. */ + ASSERT(sc->sa.pag->pagi_init); + sc->sa.pag->pagi_init = 0; +} + +/* Set btree root information in an AGI. */ +STATIC void +xrep_agi_set_roots( + struct xfs_scrub *sc, + struct xfs_agi *agi, + struct xrep_find_ag_btree *fab) +{ + agi->agi_root = cpu_to_be32(fab[XREP_AGI_INOBT].root); + agi->agi_level = cpu_to_be32(fab[XREP_AGI_INOBT].height); + + if (xfs_has_finobt(sc->mp)) { + agi->agi_free_root = cpu_to_be32(fab[XREP_AGI_FINOBT].root); + agi->agi_free_level = cpu_to_be32(fab[XREP_AGI_FINOBT].height); + } +} + +/* Update the AGI counters. */ +STATIC int +xrep_agi_calc_from_btrees( + struct xfs_scrub *sc, + struct xfs_buf *agi_bp) +{ + struct xfs_btree_cur *cur; + struct xfs_agi *agi = agi_bp->b_addr; + struct xfs_mount *mp = sc->mp; + xfs_agino_t count; + xfs_agino_t freecount; + int error; + + cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, + sc->sa.pag, XFS_BTNUM_INO); + error = xfs_ialloc_count_inodes(cur, &count, &freecount); + if (error) + goto err; + if (xfs_has_inobtcounts(mp)) { + xfs_agblock_t blocks; + + error = xfs_btree_count_blocks(cur, &blocks); + if (error) + goto err; + agi->agi_iblocks = cpu_to_be32(blocks); + } + xfs_btree_del_cursor(cur, error); + + agi->agi_count = cpu_to_be32(count); + agi->agi_freecount = cpu_to_be32(freecount); + + if (xfs_has_finobt(mp) && xfs_has_inobtcounts(mp)) { + xfs_agblock_t blocks; + + cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, + sc->sa.pag, XFS_BTNUM_FINO); + error = xfs_btree_count_blocks(cur, &blocks); + if (error) + goto err; + xfs_btree_del_cursor(cur, error); + agi->agi_fblocks = cpu_to_be32(blocks); + } + + return 0; +err: + xfs_btree_del_cursor(cur, error); + return error; +} + +/* Trigger reinitialization of the in-core data. */ +STATIC int +xrep_agi_commit_new( + struct xfs_scrub *sc, + struct xfs_buf *agi_bp) +{ + struct xfs_perag *pag; + struct xfs_agi *agi = agi_bp->b_addr; + + /* Trigger inode count recalculation */ + xfs_force_summary_recalc(sc->mp); + + /* Write this to disk. */ + xfs_trans_buf_set_type(sc->tp, agi_bp, XFS_BLFT_AGI_BUF); + xfs_trans_log_buf(sc->tp, agi_bp, 0, BBTOB(agi_bp->b_length) - 1); + + /* Now reinitialize the in-core counters if necessary. */ + pag = sc->sa.pag; + pag->pagi_count = be32_to_cpu(agi->agi_count); + pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); + pag->pagi_init = 1; + + return 0; +} + +/* Repair the AGI. */ +int +xrep_agi( + struct xfs_scrub *sc) +{ + struct xrep_find_ag_btree fab[XREP_AGI_MAX] = { + [XREP_AGI_INOBT] = { + .rmap_owner = XFS_RMAP_OWN_INOBT, + .buf_ops = &xfs_inobt_buf_ops, + .maxlevels = M_IGEO(sc->mp)->inobt_maxlevels, + }, + [XREP_AGI_FINOBT] = { + .rmap_owner = XFS_RMAP_OWN_INOBT, + .buf_ops = &xfs_finobt_buf_ops, + .maxlevels = M_IGEO(sc->mp)->inobt_maxlevels, + }, + [XREP_AGI_END] = { + .buf_ops = NULL + }, + }; + struct xfs_agi old_agi; + struct xfs_mount *mp = sc->mp; + struct xfs_buf *agi_bp; + struct xfs_agi *agi; + int error; + + /* We require the rmapbt to rebuild anything. */ + if (!xfs_has_rmapbt(mp)) + return -EOPNOTSUPP; + + /* + * Make sure we have the AGI buffer, as scrub might have decided it + * was corrupt after xfs_ialloc_read_agi failed with -EFSCORRUPTED. + */ + error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp, + XFS_AG_DADDR(mp, sc->sa.pag->pag_agno, + XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), 0, &agi_bp, NULL); + if (error) + return error; + agi_bp->b_ops = &xfs_agi_buf_ops; + agi = agi_bp->b_addr; + + /* Find the AGI btree roots. */ + error = xrep_agi_find_btrees(sc, fab); + if (error) + return error; + + /* Start rewriting the header and implant the btrees we found. */ + xrep_agi_init_header(sc, agi_bp, &old_agi); + xrep_agi_set_roots(sc, agi, fab); + error = xrep_agi_calc_from_btrees(sc, agi_bp); + if (error) + goto out_revert; + + /* Reinitialize in-core state. */ + return xrep_agi_commit_new(sc, agi_bp); + +out_revert: + /* Mark the incore AGI state stale and revert the AGI. */ + sc->sa.pag->pagi_init = 0; + memcpy(agi, &old_agi, sizeof(old_agi)); + return error; +} diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c new file mode 100644 index 000000000..3b38f4e2a --- /dev/null +++ b/fs/xfs/scrub/alloc.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_alloc.h" +#include "xfs_rmap.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" +#include "xfs_ag.h" + +/* + * Set us up to scrub free space btrees. + */ +int +xchk_setup_ag_allocbt( + struct xfs_scrub *sc) +{ + return xchk_setup_ag_btree(sc, false); +} + +/* Free space btree scrubber. */ +/* + * Ensure there's a corresponding cntbt/bnobt record matching this + * bnobt/cntbt record, respectively. + */ +STATIC void +xchk_allocbt_xref_other( + struct xfs_scrub *sc, + xfs_agblock_t agbno, + xfs_extlen_t len) +{ + struct xfs_btree_cur **pcur; + xfs_agblock_t fbno; + xfs_extlen_t flen; + int has_otherrec; + int error; + + if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT) + pcur = &sc->sa.cnt_cur; + else + pcur = &sc->sa.bno_cur; + if (!*pcur || xchk_skip_xref(sc->sm)) + return; + + error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec); + if (!xchk_should_check_xref(sc, &error, pcur)) + return; + if (!has_otherrec) { + xchk_btree_xref_set_corrupt(sc, *pcur, 0); + return; + } + + error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec); + if (!xchk_should_check_xref(sc, &error, pcur)) + return; + if (!has_otherrec) { + xchk_btree_xref_set_corrupt(sc, *pcur, 0); + return; + } + + if (fbno != agbno || flen != len) + xchk_btree_xref_set_corrupt(sc, *pcur, 0); +} + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_allocbt_xref( + struct xfs_scrub *sc, + xfs_agblock_t agbno, + xfs_extlen_t len) +{ + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + xchk_allocbt_xref_other(sc, agbno, len); + xchk_xref_is_not_inode_chunk(sc, agbno, len); + xchk_xref_has_no_owner(sc, agbno, len); + xchk_xref_is_not_shared(sc, agbno, len); +} + +/* Scrub a bnobt/cntbt record. */ +STATIC int +xchk_allocbt_rec( + struct xchk_btree *bs, + const union xfs_btree_rec *rec) +{ + struct xfs_perag *pag = bs->cur->bc_ag.pag; + xfs_agblock_t bno; + xfs_extlen_t len; + + bno = be32_to_cpu(rec->alloc.ar_startblock); + len = be32_to_cpu(rec->alloc.ar_blockcount); + + if (!xfs_verify_agbext(pag, bno, len)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + xchk_allocbt_xref(bs->sc, bno, len); + + return 0; +} + +/* Scrub the freespace btrees for some AG. */ +STATIC int +xchk_allocbt( + struct xfs_scrub *sc, + xfs_btnum_t which) +{ + struct xfs_btree_cur *cur; + + cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur; + return xchk_btree(sc, cur, xchk_allocbt_rec, &XFS_RMAP_OINFO_AG, NULL); +} + +int +xchk_bnobt( + struct xfs_scrub *sc) +{ + return xchk_allocbt(sc, XFS_BTNUM_BNO); +} + +int +xchk_cntbt( + struct xfs_scrub *sc) +{ + return xchk_allocbt(sc, XFS_BTNUM_CNT); +} + +/* xref check that the extent is not free */ +void +xchk_xref_is_used_space( + struct xfs_scrub *sc, + xfs_agblock_t agbno, + xfs_extlen_t len) +{ + bool is_freesp; + int error; + + if (!sc->sa.bno_cur || xchk_skip_xref(sc->sm)) + return; + + error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp); + if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) + return; + if (is_freesp) + xchk_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0); +} diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c new file mode 100644 index 000000000..b6f0c9f3f --- /dev/null +++ b/fs/xfs/scrub/attr.c @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_log_format.h" +#include "xfs_inode.h" +#include "xfs_da_format.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/dabtree.h" +#include "scrub/attr.h" + +/* + * Allocate enough memory to hold an attr value and attr block bitmaps, + * reallocating the buffer if necessary. Buffer contents are not preserved + * across a reallocation. + */ +static int +xchk_setup_xattr_buf( + struct xfs_scrub *sc, + size_t value_size, + gfp_t flags) +{ + size_t sz; + struct xchk_xattr_buf *ab = sc->buf; + + /* + * We need enough space to read an xattr value from the file or enough + * space to hold three copies of the xattr free space bitmap. We don't + * need the buffer space for both purposes at the same time. + */ + sz = 3 * sizeof(long) * BITS_TO_LONGS(sc->mp->m_attr_geo->blksize); + sz = max_t(size_t, sz, value_size); + + /* + * If there's already a buffer, figure out if we need to reallocate it + * to accommodate a larger size. + */ + if (ab) { + if (sz <= ab->sz) + return 0; + kmem_free(ab); + sc->buf = NULL; + } + + /* + * Don't zero the buffer upon allocation to avoid runtime overhead. + * All users must be careful never to read uninitialized contents. + */ + ab = kvmalloc(sizeof(*ab) + sz, flags); + if (!ab) + return -ENOMEM; + + ab->sz = sz; + sc->buf = ab; + return 0; +} + +/* Set us up to scrub an inode's extended attributes. */ +int +xchk_setup_xattr( + struct xfs_scrub *sc) +{ + int error; + + /* + * We failed to get memory while checking attrs, so this time try to + * get all the memory we're ever going to need. Allocate the buffer + * without the inode lock held, which means we can sleep. + */ + if (sc->flags & XCHK_TRY_HARDER) { + error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, GFP_KERNEL); + if (error) + return error; + } + + return xchk_setup_inode_contents(sc, 0); +} + +/* Extended Attributes */ + +struct xchk_xattr { + struct xfs_attr_list_context context; + struct xfs_scrub *sc; +}; + +/* + * Check that an extended attribute key can be looked up by hash. + * + * We use the XFS attribute list iterator (i.e. xfs_attr_list_ilocked) + * to call this function for every attribute key in an inode. Once + * we're here, we load the attribute value to see if any errors happen, + * or if we get more or less data than we expected. + */ +static void +xchk_xattr_listent( + struct xfs_attr_list_context *context, + int flags, + unsigned char *name, + int namelen, + int valuelen) +{ + struct xchk_xattr *sx; + struct xfs_da_args args = { NULL }; + int error = 0; + + sx = container_of(context, struct xchk_xattr, context); + + if (xchk_should_terminate(sx->sc, &error)) { + context->seen_enough = error; + return; + } + + if (flags & XFS_ATTR_INCOMPLETE) { + /* Incomplete attr key, just mark the inode for preening. */ + xchk_ino_set_preen(sx->sc, context->dp->i_ino); + return; + } + + /* Does this name make sense? */ + if (!xfs_attr_namecheck(name, namelen)) { + xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno); + return; + } + + /* + * Try to allocate enough memory to extrat the attr value. If that + * doesn't work, we overload the seen_enough variable to convey + * the error message back to the main scrub function. + */ + error = xchk_setup_xattr_buf(sx->sc, valuelen, + GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (error == -ENOMEM) + error = -EDEADLOCK; + if (error) { + context->seen_enough = error; + return; + } + + args.op_flags = XFS_DA_OP_NOTIME; + args.attr_filter = flags & XFS_ATTR_NSP_ONDISK_MASK; + args.geo = context->dp->i_mount->m_attr_geo; + args.whichfork = XFS_ATTR_FORK; + args.dp = context->dp; + args.name = name; + args.namelen = namelen; + args.hashval = xfs_da_hashname(args.name, args.namelen); + args.trans = context->tp; + args.value = xchk_xattr_valuebuf(sx->sc); + args.valuelen = valuelen; + + error = xfs_attr_get_ilocked(&args); + /* ENODATA means the hash lookup failed and the attr is bad */ + if (error == -ENODATA) + error = -EFSCORRUPTED; + if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno, + &error)) + goto fail_xref; + if (args.valuelen != valuelen) + xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, + args.blkno); +fail_xref: + if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + context->seen_enough = 1; + return; +} + +/* + * Mark a range [start, start+len) in this map. Returns true if the + * region was free, and false if there's a conflict or a problem. + * + * Within a char, the lowest bit of the char represents the byte with + * the smallest address + */ +STATIC bool +xchk_xattr_set_map( + struct xfs_scrub *sc, + unsigned long *map, + unsigned int start, + unsigned int len) +{ + unsigned int mapsize = sc->mp->m_attr_geo->blksize; + bool ret = true; + + if (start >= mapsize) + return false; + if (start + len > mapsize) { + len = mapsize - start; + ret = false; + } + + if (find_next_bit(map, mapsize, start) < start + len) + ret = false; + bitmap_set(map, start, len); + + return ret; +} + +/* + * Check the leaf freemap from the usage bitmap. Returns false if the + * attr freemap has problems or points to used space. + */ +STATIC bool +xchk_xattr_check_freemap( + struct xfs_scrub *sc, + unsigned long *map, + struct xfs_attr3_icleaf_hdr *leafhdr) +{ + unsigned long *freemap = xchk_xattr_freemap(sc); + unsigned long *dstmap = xchk_xattr_dstmap(sc); + unsigned int mapsize = sc->mp->m_attr_geo->blksize; + int i; + + /* Construct bitmap of freemap contents. */ + bitmap_zero(freemap, mapsize); + for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { + if (!xchk_xattr_set_map(sc, freemap, + leafhdr->freemap[i].base, + leafhdr->freemap[i].size)) + return false; + } + + /* Look for bits that are set in freemap and are marked in use. */ + return bitmap_and(dstmap, freemap, map, mapsize) == 0; +} + +/* + * Check this leaf entry's relations to everything else. + * Returns the number of bytes used for the name/value data. + */ +STATIC void +xchk_xattr_entry( + struct xchk_da_btree *ds, + int level, + char *buf_end, + struct xfs_attr_leafblock *leaf, + struct xfs_attr3_icleaf_hdr *leafhdr, + struct xfs_attr_leaf_entry *ent, + int idx, + unsigned int *usedbytes, + __u32 *last_hashval) +{ + struct xfs_mount *mp = ds->state->mp; + unsigned long *usedmap = xchk_xattr_usedmap(ds->sc); + char *name_end; + struct xfs_attr_leaf_name_local *lentry; + struct xfs_attr_leaf_name_remote *rentry; + unsigned int nameidx; + unsigned int namesize; + + if (ent->pad2 != 0) + xchk_da_set_corrupt(ds, level); + + /* Hash values in order? */ + if (be32_to_cpu(ent->hashval) < *last_hashval) + xchk_da_set_corrupt(ds, level); + *last_hashval = be32_to_cpu(ent->hashval); + + nameidx = be16_to_cpu(ent->nameidx); + if (nameidx < leafhdr->firstused || + nameidx >= mp->m_attr_geo->blksize) { + xchk_da_set_corrupt(ds, level); + return; + } + + /* Check the name information. */ + if (ent->flags & XFS_ATTR_LOCAL) { + lentry = xfs_attr3_leaf_name_local(leaf, idx); + namesize = xfs_attr_leaf_entsize_local(lentry->namelen, + be16_to_cpu(lentry->valuelen)); + name_end = (char *)lentry + namesize; + if (lentry->namelen == 0) + xchk_da_set_corrupt(ds, level); + } else { + rentry = xfs_attr3_leaf_name_remote(leaf, idx); + namesize = xfs_attr_leaf_entsize_remote(rentry->namelen); + name_end = (char *)rentry + namesize; + if (rentry->namelen == 0 || rentry->valueblk == 0) + xchk_da_set_corrupt(ds, level); + } + if (name_end > buf_end) + xchk_da_set_corrupt(ds, level); + + if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize)) + xchk_da_set_corrupt(ds, level); + if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + *usedbytes += namesize; +} + +/* Scrub an attribute leaf. */ +STATIC int +xchk_xattr_block( + struct xchk_da_btree *ds, + int level) +{ + struct xfs_attr3_icleaf_hdr leafhdr; + struct xfs_mount *mp = ds->state->mp; + struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; + struct xfs_buf *bp = blk->bp; + xfs_dablk_t *last_checked = ds->private; + struct xfs_attr_leafblock *leaf = bp->b_addr; + struct xfs_attr_leaf_entry *ent; + struct xfs_attr_leaf_entry *entries; + unsigned long *usedmap; + char *buf_end; + size_t off; + __u32 last_hashval = 0; + unsigned int usedbytes = 0; + unsigned int hdrsize; + int i; + int error; + + if (*last_checked == blk->blkno) + return 0; + + /* Allocate memory for block usage checking. */ + error = xchk_setup_xattr_buf(ds->sc, 0, + GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (error == -ENOMEM) + return -EDEADLOCK; + if (error) + return error; + usedmap = xchk_xattr_usedmap(ds->sc); + + *last_checked = blk->blkno; + bitmap_zero(usedmap, mp->m_attr_geo->blksize); + + /* Check all the padding. */ + if (xfs_has_crc(ds->sc->mp)) { + struct xfs_attr3_leafblock *leaf = bp->b_addr; + + if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 || + leaf->hdr.info.hdr.pad != 0) + xchk_da_set_corrupt(ds, level); + } else { + if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0) + xchk_da_set_corrupt(ds, level); + } + + /* Check the leaf header */ + xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); + hdrsize = xfs_attr3_leaf_hdr_size(leaf); + + if (leafhdr.usedbytes > mp->m_attr_geo->blksize) + xchk_da_set_corrupt(ds, level); + if (leafhdr.firstused > mp->m_attr_geo->blksize) + xchk_da_set_corrupt(ds, level); + if (leafhdr.firstused < hdrsize) + xchk_da_set_corrupt(ds, level); + if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize)) + xchk_da_set_corrupt(ds, level); + + if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + entries = xfs_attr3_leaf_entryp(leaf); + if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused) + xchk_da_set_corrupt(ds, level); + + buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize; + for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) { + /* Mark the leaf entry itself. */ + off = (char *)ent - (char *)leaf; + if (!xchk_xattr_set_map(ds->sc, usedmap, off, + sizeof(xfs_attr_leaf_entry_t))) { + xchk_da_set_corrupt(ds, level); + goto out; + } + + /* Check the entry and nameval. */ + xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr, + ent, i, &usedbytes, &last_hashval); + + if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + } + + if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr)) + xchk_da_set_corrupt(ds, level); + + if (leafhdr.usedbytes != usedbytes) + xchk_da_set_corrupt(ds, level); + +out: + return 0; +} + +/* Scrub a attribute btree record. */ +STATIC int +xchk_xattr_rec( + struct xchk_da_btree *ds, + int level) +{ + struct xfs_mount *mp = ds->state->mp; + struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; + struct xfs_attr_leaf_name_local *lentry; + struct xfs_attr_leaf_name_remote *rentry; + struct xfs_buf *bp; + struct xfs_attr_leaf_entry *ent; + xfs_dahash_t calc_hash; + xfs_dahash_t hash; + int nameidx; + int hdrsize; + unsigned int badflags; + int error; + + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + + ent = xfs_attr3_leaf_entryp(blk->bp->b_addr) + blk->index; + + /* Check the whole block, if necessary. */ + error = xchk_xattr_block(ds, level); + if (error) + goto out; + if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + /* Check the hash of the entry. */ + error = xchk_da_btree_hash(ds, level, &ent->hashval); + if (error) + goto out; + + /* Find the attr entry's location. */ + bp = blk->bp; + hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr); + nameidx = be16_to_cpu(ent->nameidx); + if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) { + xchk_da_set_corrupt(ds, level); + goto out; + } + + /* Retrieve the entry and check it. */ + hash = be32_to_cpu(ent->hashval); + badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE | + XFS_ATTR_INCOMPLETE); + if ((ent->flags & badflags) != 0) + xchk_da_set_corrupt(ds, level); + if (ent->flags & XFS_ATTR_LOCAL) { + lentry = (struct xfs_attr_leaf_name_local *) + (((char *)bp->b_addr) + nameidx); + if (lentry->namelen <= 0) { + xchk_da_set_corrupt(ds, level); + goto out; + } + calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen); + } else { + rentry = (struct xfs_attr_leaf_name_remote *) + (((char *)bp->b_addr) + nameidx); + if (rentry->namelen <= 0) { + xchk_da_set_corrupt(ds, level); + goto out; + } + calc_hash = xfs_da_hashname(rentry->name, rentry->namelen); + } + if (calc_hash != hash) + xchk_da_set_corrupt(ds, level); + +out: + return error; +} + +/* Scrub the extended attribute metadata. */ +int +xchk_xattr( + struct xfs_scrub *sc) +{ + struct xchk_xattr sx; + xfs_dablk_t last_checked = -1U; + int error = 0; + + if (!xfs_inode_hasattr(sc->ip)) + return -ENOENT; + + memset(&sx, 0, sizeof(sx)); + /* Check attribute tree structure */ + error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec, + &last_checked); + if (error) + goto out; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + /* Check that every attr key can also be looked up by hash. */ + sx.context.dp = sc->ip; + sx.context.resynch = 1; + sx.context.put_listent = xchk_xattr_listent; + sx.context.tp = sc->tp; + sx.context.allow_incomplete = true; + sx.sc = sc; + + /* + * Look up every xattr in this file by name. + * + * Use the backend implementation of xfs_attr_list to call + * xchk_xattr_listent on every attribute key in this inode. + * In other words, we use the same iterator/callback mechanism + * that listattr uses to scrub extended attributes, though in our + * _listent function, we check the value of the attribute. + * + * The VFS only locks i_rwsem when modifying attrs, so keep all + * three locks held because that's the only way to ensure we're + * the only thread poking into the da btree. We traverse the da + * btree while holding a leaf buffer locked for the xattr name + * iteration, which doesn't really follow the usual buffer + * locking order. + */ + error = xfs_attr_list_ilocked(&sx.context); + if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error)) + goto out; + + /* Did our listent function try to return any errors? */ + if (sx.context.seen_enough < 0) + error = sx.context.seen_enough; +out: + return error; +} diff --git a/fs/xfs/scrub/attr.h b/fs/xfs/scrub/attr.h new file mode 100644 index 000000000..3590e10e3 --- /dev/null +++ b/fs/xfs/scrub/attr.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_ATTR_H__ +#define __XFS_SCRUB_ATTR_H__ + +/* + * Temporary storage for online scrub and repair of extended attributes. + */ +struct xchk_xattr_buf { + /* Size of @buf, in bytes. */ + size_t sz; + + /* + * Memory buffer -- either used for extracting attr values while + * walking the attributes; or for computing attr block bitmaps when + * checking the attribute tree. + * + * Each bitmap contains enough bits to track every byte in an attr + * block (rounded up to the size of an unsigned long). The attr block + * used space bitmap starts at the beginning of the buffer; the free + * space bitmap follows immediately after; and we have a third buffer + * for storing intermediate bitmap results. + */ + uint8_t buf[]; +}; + +/* A place to store attribute values. */ +static inline uint8_t * +xchk_xattr_valuebuf( + struct xfs_scrub *sc) +{ + struct xchk_xattr_buf *ab = sc->buf; + + return ab->buf; +} + +/* A bitmap of space usage computed by walking an attr leaf block. */ +static inline unsigned long * +xchk_xattr_usedmap( + struct xfs_scrub *sc) +{ + struct xchk_xattr_buf *ab = sc->buf; + + return (unsigned long *)ab->buf; +} + +/* A bitmap of free space computed by walking attr leaf block free info. */ +static inline unsigned long * +xchk_xattr_freemap( + struct xfs_scrub *sc) +{ + return xchk_xattr_usedmap(sc) + + BITS_TO_LONGS(sc->mp->m_attr_geo->blksize); +} + +/* A bitmap used to hold temporary results. */ +static inline unsigned long * +xchk_xattr_dstmap( + struct xfs_scrub *sc) +{ + return xchk_xattr_freemap(sc) + + BITS_TO_LONGS(sc->mp->m_attr_geo->blksize); +} + +#endif /* __XFS_SCRUB_ATTR_H__ */ diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c new file mode 100644 index 000000000..b89bf9de9 --- /dev/null +++ b/fs/xfs/scrub/bitmap.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "scrub/bitmap.h" + +/* + * Set a range of this bitmap. Caller must ensure the range is not set. + * + * This is the logical equivalent of bitmap |= mask(start, len). + */ +int +xbitmap_set( + struct xbitmap *bitmap, + uint64_t start, + uint64_t len) +{ + struct xbitmap_range *bmr; + + bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL); + if (!bmr) + return -ENOMEM; + + INIT_LIST_HEAD(&bmr->list); + bmr->start = start; + bmr->len = len; + list_add_tail(&bmr->list, &bitmap->list); + + return 0; +} + +/* Free everything related to this bitmap. */ +void +xbitmap_destroy( + struct xbitmap *bitmap) +{ + struct xbitmap_range *bmr; + struct xbitmap_range *n; + + for_each_xbitmap_extent(bmr, n, bitmap) { + list_del(&bmr->list); + kmem_free(bmr); + } +} + +/* Set up a per-AG block bitmap. */ +void +xbitmap_init( + struct xbitmap *bitmap) +{ + INIT_LIST_HEAD(&bitmap->list); +} + +/* Compare two btree extents. */ +static int +xbitmap_range_cmp( + void *priv, + const struct list_head *a, + const struct list_head *b) +{ + struct xbitmap_range *ap; + struct xbitmap_range *bp; + + ap = container_of(a, struct xbitmap_range, list); + bp = container_of(b, struct xbitmap_range, list); + + if (ap->start > bp->start) + return 1; + if (ap->start < bp->start) + return -1; + return 0; +} + +/* + * Remove all the blocks mentioned in @sub from the extents in @bitmap. + * + * The intent is that callers will iterate the rmapbt for all of its records + * for a given owner to generate @bitmap; and iterate all the blocks of the + * metadata structures that are not being rebuilt and have the same rmapbt + * owner to generate @sub. This routine subtracts all the extents + * mentioned in sub from all the extents linked in @bitmap, which leaves + * @bitmap as the list of blocks that are not accounted for, which we assume + * are the dead blocks of the old metadata structure. The blocks mentioned in + * @bitmap can be reaped. + * + * This is the logical equivalent of bitmap &= ~sub. + */ +#define LEFT_ALIGNED (1 << 0) +#define RIGHT_ALIGNED (1 << 1) +int +xbitmap_disunion( + struct xbitmap *bitmap, + struct xbitmap *sub) +{ + struct list_head *lp; + struct xbitmap_range *br; + struct xbitmap_range *new_br; + struct xbitmap_range *sub_br; + uint64_t sub_start; + uint64_t sub_len; + int state; + int error = 0; + + if (list_empty(&bitmap->list) || list_empty(&sub->list)) + return 0; + ASSERT(!list_empty(&sub->list)); + + list_sort(NULL, &bitmap->list, xbitmap_range_cmp); + list_sort(NULL, &sub->list, xbitmap_range_cmp); + + /* + * Now that we've sorted both lists, we iterate bitmap once, rolling + * forward through sub and/or bitmap as necessary until we find an + * overlap or reach the end of either list. We do not reset lp to the + * head of bitmap nor do we reset sub_br to the head of sub. The + * list traversal is similar to merge sort, but we're deleting + * instead. In this manner we avoid O(n^2) operations. + */ + sub_br = list_first_entry(&sub->list, struct xbitmap_range, + list); + lp = bitmap->list.next; + while (lp != &bitmap->list) { + br = list_entry(lp, struct xbitmap_range, list); + + /* + * Advance sub_br and/or br until we find a pair that + * intersect or we run out of extents. + */ + while (sub_br->start + sub_br->len <= br->start) { + if (list_is_last(&sub_br->list, &sub->list)) + goto out; + sub_br = list_next_entry(sub_br, list); + } + if (sub_br->start >= br->start + br->len) { + lp = lp->next; + continue; + } + + /* trim sub_br to fit the extent we have */ + sub_start = sub_br->start; + sub_len = sub_br->len; + if (sub_br->start < br->start) { + sub_len -= br->start - sub_br->start; + sub_start = br->start; + } + if (sub_len > br->len) + sub_len = br->len; + + state = 0; + if (sub_start == br->start) + state |= LEFT_ALIGNED; + if (sub_start + sub_len == br->start + br->len) + state |= RIGHT_ALIGNED; + switch (state) { + case LEFT_ALIGNED: + /* Coincides with only the left. */ + br->start += sub_len; + br->len -= sub_len; + break; + case RIGHT_ALIGNED: + /* Coincides with only the right. */ + br->len -= sub_len; + lp = lp->next; + break; + case LEFT_ALIGNED | RIGHT_ALIGNED: + /* Total overlap, just delete ex. */ + lp = lp->next; + list_del(&br->list); + kmem_free(br); + break; + case 0: + /* + * Deleting from the middle: add the new right extent + * and then shrink the left extent. + */ + new_br = kmem_alloc(sizeof(struct xbitmap_range), + KM_MAYFAIL); + if (!new_br) { + error = -ENOMEM; + goto out; + } + INIT_LIST_HEAD(&new_br->list); + new_br->start = sub_start + sub_len; + new_br->len = br->start + br->len - new_br->start; + list_add(&new_br->list, &br->list); + br->len = sub_start - br->start; + lp = lp->next; + break; + default: + ASSERT(0); + break; + } + } + +out: + return error; +} +#undef LEFT_ALIGNED +#undef RIGHT_ALIGNED + +/* + * Record all btree blocks seen while iterating all records of a btree. + * + * We know that the btree query_all function starts at the left edge and walks + * towards the right edge of the tree. Therefore, we know that we can walk up + * the btree cursor towards the root; if the pointer for a given level points + * to the first record/key in that block, we haven't seen this block before; + * and therefore we need to remember that we saw this block in the btree. + * + * So if our btree is: + * + * 4 + * / | \ + * 1 2 3 + * + * Pretend for this example that each leaf block has 100 btree records. For + * the first btree record, we'll observe that bc_levels[0].ptr == 1, so we + * record that we saw block 1. Then we observe that bc_levels[1].ptr == 1, so + * we record block 4. The list is [1, 4]. + * + * For the second btree record, we see that bc_levels[0].ptr == 2, so we exit + * the loop. The list remains [1, 4]. + * + * For the 101st btree record, we've moved onto leaf block 2. Now + * bc_levels[0].ptr == 1 again, so we record that we saw block 2. We see that + * bc_levels[1].ptr == 2, so we exit the loop. The list is now [1, 4, 2]. + * + * For the 102nd record, bc_levels[0].ptr == 2, so we continue. + * + * For the 201st record, we've moved on to leaf block 3. + * bc_levels[0].ptr == 1, so we add 3 to the list. Now it is [1, 4, 2, 3]. + * + * For the 300th record we just exit, with the list being [1, 4, 2, 3]. + */ + +/* + * Record all the buffers pointed to by the btree cursor. Callers already + * engaged in a btree walk should call this function to capture the list of + * blocks going from the leaf towards the root. + */ +int +xbitmap_set_btcur_path( + struct xbitmap *bitmap, + struct xfs_btree_cur *cur) +{ + struct xfs_buf *bp; + xfs_fsblock_t fsb; + int i; + int error; + + for (i = 0; i < cur->bc_nlevels && cur->bc_levels[i].ptr == 1; i++) { + xfs_btree_get_block(cur, i, &bp); + if (!bp) + continue; + fsb = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); + error = xbitmap_set(bitmap, fsb, 1); + if (error) + return error; + } + + return 0; +} + +/* Collect a btree's block in the bitmap. */ +STATIC int +xbitmap_collect_btblock( + struct xfs_btree_cur *cur, + int level, + void *priv) +{ + struct xbitmap *bitmap = priv; + struct xfs_buf *bp; + xfs_fsblock_t fsbno; + + xfs_btree_get_block(cur, level, &bp); + if (!bp) + return 0; + + fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); + return xbitmap_set(bitmap, fsbno, 1); +} + +/* Walk the btree and mark the bitmap wherever a btree block is found. */ +int +xbitmap_set_btblocks( + struct xbitmap *bitmap, + struct xfs_btree_cur *cur) +{ + return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock, + XFS_BTREE_VISIT_ALL, bitmap); +} + +/* How many bits are set in this bitmap? */ +uint64_t +xbitmap_hweight( + struct xbitmap *bitmap) +{ + struct xbitmap_range *bmr; + struct xbitmap_range *n; + uint64_t ret = 0; + + for_each_xbitmap_extent(bmr, n, bitmap) + ret += bmr->len; + + return ret; +} diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h new file mode 100644 index 000000000..900646b72 --- /dev/null +++ b/fs/xfs/scrub/bitmap.h @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_BITMAP_H__ +#define __XFS_SCRUB_BITMAP_H__ + +struct xbitmap_range { + struct list_head list; + uint64_t start; + uint64_t len; +}; + +struct xbitmap { + struct list_head list; +}; + +void xbitmap_init(struct xbitmap *bitmap); +void xbitmap_destroy(struct xbitmap *bitmap); + +#define for_each_xbitmap_extent(bex, n, bitmap) \ + list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) + +#define for_each_xbitmap_block(b, bex, n, bitmap) \ + list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \ + for ((b) = (bex)->start; (b) < (bex)->start + (bex)->len; (b)++) + +int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len); +int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub); +int xbitmap_set_btcur_path(struct xbitmap *bitmap, + struct xfs_btree_cur *cur); +int xbitmap_set_btblocks(struct xbitmap *bitmap, + struct xfs_btree_cur *cur); +uint64_t xbitmap_hweight(struct xbitmap *bitmap); + +#endif /* __XFS_SCRUB_BITMAP_H__ */ diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c new file mode 100644 index 000000000..f0b9cb650 --- /dev/null +++ b/fs/xfs/scrub/bmap.c @@ -0,0 +1,741 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_bit.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_bmap.h" +#include "xfs_bmap_btree.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" +#include "xfs_ag.h" + +/* Set us up with an inode's bmap. */ +int +xchk_setup_inode_bmap( + struct xfs_scrub *sc) +{ + int error; + + error = xchk_get_inode(sc); + if (error) + goto out; + + sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; + xfs_ilock(sc->ip, sc->ilock_flags); + + /* + * We don't want any ephemeral data fork updates sitting around + * while we inspect block mappings, so wait for directio to finish + * and flush dirty data if we have delalloc reservations. + */ + if (S_ISREG(VFS_I(sc->ip)->i_mode) && + sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { + struct address_space *mapping = VFS_I(sc->ip)->i_mapping; + + inode_dio_wait(VFS_I(sc->ip)); + + /* + * Try to flush all incore state to disk before we examine the + * space mappings for the data fork. Leave accumulated errors + * in the mapping for the writer threads to consume. + * + * On ENOSPC or EIO writeback errors, we continue into the + * extent mapping checks because write failures do not + * necessarily imply anything about the correctness of the file + * metadata. The metadata and the file data could be on + * completely separate devices; a media failure might only + * affect a subset of the disk, etc. We can handle delalloc + * extents in the scrubber, so leaving them in memory is fine. + */ + error = filemap_fdatawrite(mapping); + if (!error) + error = filemap_fdatawait_keep_errors(mapping); + if (error && (error != -ENOSPC && error != -EIO)) + goto out; + } + + /* Got the inode, lock it and we're ready to go. */ + error = xchk_trans_alloc(sc, 0); + if (error) + goto out; + sc->ilock_flags |= XFS_ILOCK_EXCL; + xfs_ilock(sc->ip, XFS_ILOCK_EXCL); + +out: + /* scrub teardown will unlock and release the inode */ + return error; +} + +/* + * Inode fork block mapping (BMBT) scrubber. + * More complex than the others because we have to scrub + * all the extents regardless of whether or not the fork + * is in btree format. + */ + +struct xchk_bmap_info { + struct xfs_scrub *sc; + xfs_fileoff_t lastoff; + bool is_rt; + bool is_shared; + bool was_loaded; + int whichfork; +}; + +/* Look for a corresponding rmap for this irec. */ +static inline bool +xchk_bmap_get_rmap( + struct xchk_bmap_info *info, + struct xfs_bmbt_irec *irec, + xfs_agblock_t agbno, + uint64_t owner, + struct xfs_rmap_irec *rmap) +{ + xfs_fileoff_t offset; + unsigned int rflags = 0; + int has_rmap; + int error; + + if (info->whichfork == XFS_ATTR_FORK) + rflags |= XFS_RMAP_ATTR_FORK; + if (irec->br_state == XFS_EXT_UNWRITTEN) + rflags |= XFS_RMAP_UNWRITTEN; + + /* + * CoW staging extents are owned (on disk) by the refcountbt, so + * their rmaps do not have offsets. + */ + if (info->whichfork == XFS_COW_FORK) + offset = 0; + else + offset = irec->br_startoff; + + /* + * If the caller thinks this could be a shared bmbt extent (IOWs, + * any data fork extent of a reflink inode) then we have to use the + * range rmap lookup to make sure we get the correct owner/offset. + */ + if (info->is_shared) { + error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno, + owner, offset, rflags, rmap, &has_rmap); + } else { + error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, + owner, offset, rflags, rmap, &has_rmap); + } + if (!xchk_should_check_xref(info->sc, &error, &info->sc->sa.rmap_cur)) + return false; + + if (!has_rmap) + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + return has_rmap; +} + +/* Make sure that we have rmapbt records for this extent. */ +STATIC void +xchk_bmap_xref_rmap( + struct xchk_bmap_info *info, + struct xfs_bmbt_irec *irec, + xfs_agblock_t agbno) +{ + struct xfs_rmap_irec rmap; + unsigned long long rmap_end; + uint64_t owner; + + if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm)) + return; + + if (info->whichfork == XFS_COW_FORK) + owner = XFS_RMAP_OWN_COW; + else + owner = info->sc->ip->i_ino; + + /* Find the rmap record for this irec. */ + if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap)) + return; + + /* Check the rmap. */ + rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount; + if (rmap.rm_startblock > agbno || + agbno + irec->br_blockcount > rmap_end) + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + + /* + * Check the logical offsets if applicable. CoW staging extents + * don't track logical offsets since the mappings only exist in + * memory. + */ + if (info->whichfork != XFS_COW_FORK) { + rmap_end = (unsigned long long)rmap.rm_offset + + rmap.rm_blockcount; + if (rmap.rm_offset > irec->br_startoff || + irec->br_startoff + irec->br_blockcount > rmap_end) + xchk_fblock_xref_set_corrupt(info->sc, + info->whichfork, irec->br_startoff); + } + + if (rmap.rm_owner != owner) + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + + /* + * Check for discrepancies between the unwritten flag in the irec and + * the rmap. Note that the (in-memory) CoW fork distinguishes between + * unwritten and written extents, but we don't track that in the rmap + * records because the blocks are owned (on-disk) by the refcountbt, + * which doesn't track unwritten state. + */ + if (owner != XFS_RMAP_OWN_COW && + !!(irec->br_state == XFS_EXT_UNWRITTEN) != + !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN)) + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + + if (!!(info->whichfork == XFS_ATTR_FORK) != + !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK)) + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK) + xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); +} + +/* Cross-reference a single rtdev extent record. */ +STATIC void +xchk_bmap_rt_iextent_xref( + struct xfs_inode *ip, + struct xchk_bmap_info *info, + struct xfs_bmbt_irec *irec) +{ + xchk_xref_is_used_rt_space(info->sc, irec->br_startblock, + irec->br_blockcount); +} + +/* Cross-reference a single datadev extent record. */ +STATIC void +xchk_bmap_iextent_xref( + struct xfs_inode *ip, + struct xchk_bmap_info *info, + struct xfs_bmbt_irec *irec) +{ + struct xfs_mount *mp = info->sc->mp; + xfs_agnumber_t agno; + xfs_agblock_t agbno; + xfs_extlen_t len; + int error; + + agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock); + agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock); + len = irec->br_blockcount; + + error = xchk_ag_init_existing(info->sc, agno, &info->sc->sa); + if (!xchk_fblock_process_error(info->sc, info->whichfork, + irec->br_startoff, &error)) + goto out_free; + + xchk_xref_is_used_space(info->sc, agbno, len); + xchk_xref_is_not_inode_chunk(info->sc, agbno, len); + xchk_bmap_xref_rmap(info, irec, agbno); + switch (info->whichfork) { + case XFS_DATA_FORK: + if (xfs_is_reflink_inode(info->sc->ip)) + break; + fallthrough; + case XFS_ATTR_FORK: + xchk_xref_is_not_shared(info->sc, agbno, + irec->br_blockcount); + break; + case XFS_COW_FORK: + xchk_xref_is_cow_staging(info->sc, agbno, + irec->br_blockcount); + break; + } + +out_free: + xchk_ag_free(info->sc, &info->sc->sa); +} + +/* + * Directories and attr forks should never have blocks that can't be addressed + * by a xfs_dablk_t. + */ +STATIC void +xchk_bmap_dirattr_extent( + struct xfs_inode *ip, + struct xchk_bmap_info *info, + struct xfs_bmbt_irec *irec) +{ + struct xfs_mount *mp = ip->i_mount; + xfs_fileoff_t off; + + if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK) + return; + + if (!xfs_verify_dablk(mp, irec->br_startoff)) + xchk_fblock_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + + off = irec->br_startoff + irec->br_blockcount - 1; + if (!xfs_verify_dablk(mp, off)) + xchk_fblock_set_corrupt(info->sc, info->whichfork, off); +} + +/* Scrub a single extent record. */ +STATIC int +xchk_bmap_iextent( + struct xfs_inode *ip, + struct xchk_bmap_info *info, + struct xfs_bmbt_irec *irec) +{ + struct xfs_mount *mp = info->sc->mp; + int error = 0; + + /* + * Check for out-of-order extents. This record could have come + * from the incore list, for which there is no ordering check. + */ + if (irec->br_startoff < info->lastoff) + xchk_fblock_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + + if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount)) + xchk_fblock_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + + xchk_bmap_dirattr_extent(ip, info, irec); + + /* There should never be a "hole" extent in either extent list. */ + if (irec->br_startblock == HOLESTARTBLOCK) + xchk_fblock_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + + /* + * Check for delalloc extents. We never iterate the ones in the + * in-core extent scan, and we should never see these in the bmbt. + */ + if (isnullstartblock(irec->br_startblock)) + xchk_fblock_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + + /* Make sure the extent points to a valid place. */ + if (irec->br_blockcount > XFS_MAX_BMBT_EXTLEN) + xchk_fblock_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + if (info->is_rt && + !xfs_verify_rtext(mp, irec->br_startblock, irec->br_blockcount)) + xchk_fblock_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + if (!info->is_rt && + !xfs_verify_fsbext(mp, irec->br_startblock, irec->br_blockcount)) + xchk_fblock_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + + /* We don't allow unwritten extents on attr forks. */ + if (irec->br_state == XFS_EXT_UNWRITTEN && + info->whichfork == XFS_ATTR_FORK) + xchk_fblock_set_corrupt(info->sc, info->whichfork, + irec->br_startoff); + + if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return 0; + + if (info->is_rt) + xchk_bmap_rt_iextent_xref(ip, info, irec); + else + xchk_bmap_iextent_xref(ip, info, irec); + + info->lastoff = irec->br_startoff + irec->br_blockcount; + return error; +} + +/* Scrub a bmbt record. */ +STATIC int +xchk_bmapbt_rec( + struct xchk_btree *bs, + const union xfs_btree_rec *rec) +{ + struct xfs_bmbt_irec irec; + struct xfs_bmbt_irec iext_irec; + struct xfs_iext_cursor icur; + struct xchk_bmap_info *info = bs->private; + struct xfs_inode *ip = bs->cur->bc_ino.ip; + struct xfs_buf *bp = NULL; + struct xfs_btree_block *block; + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, info->whichfork); + uint64_t owner; + int i; + + /* + * Check the owners of the btree blocks up to the level below + * the root since the verifiers don't do that. + */ + if (xfs_has_crc(bs->cur->bc_mp) && + bs->cur->bc_levels[0].ptr == 1) { + for (i = 0; i < bs->cur->bc_nlevels - 1; i++) { + block = xfs_btree_get_block(bs->cur, i, &bp); + owner = be64_to_cpu(block->bb_u.l.bb_owner); + if (owner != ip->i_ino) + xchk_fblock_set_corrupt(bs->sc, + info->whichfork, 0); + } + } + + /* + * Check that the incore extent tree contains an extent that matches + * this one exactly. We validate those cached bmaps later, so we don't + * need to check them here. If the incore extent tree was just loaded + * from disk by the scrubber, we assume that its contents match what's + * on disk (we still hold the ILOCK) and skip the equivalence check. + */ + if (!info->was_loaded) + return 0; + + xfs_bmbt_disk_get_all(&rec->bmbt, &irec); + if (!xfs_iext_lookup_extent(ip, ifp, irec.br_startoff, &icur, + &iext_irec) || + irec.br_startoff != iext_irec.br_startoff || + irec.br_startblock != iext_irec.br_startblock || + irec.br_blockcount != iext_irec.br_blockcount || + irec.br_state != iext_irec.br_state) + xchk_fblock_set_corrupt(bs->sc, info->whichfork, + irec.br_startoff); + return 0; +} + +/* Scan the btree records. */ +STATIC int +xchk_bmap_btree( + struct xfs_scrub *sc, + int whichfork, + struct xchk_bmap_info *info) +{ + struct xfs_owner_info oinfo; + struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork); + struct xfs_mount *mp = sc->mp; + struct xfs_inode *ip = sc->ip; + struct xfs_btree_cur *cur; + int error; + + /* Load the incore bmap cache if it's not loaded. */ + info->was_loaded = !xfs_need_iread_extents(ifp); + + error = xfs_iread_extents(sc->tp, ip, whichfork); + if (!xchk_fblock_process_error(sc, whichfork, 0, &error)) + goto out; + + /* Check the btree structure. */ + cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork); + xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); + error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info); + xfs_btree_del_cursor(cur, error); +out: + return error; +} + +struct xchk_bmap_check_rmap_info { + struct xfs_scrub *sc; + int whichfork; + struct xfs_iext_cursor icur; +}; + +/* Can we find bmaps that fit this rmap? */ +STATIC int +xchk_bmap_check_rmap( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xfs_bmbt_irec irec; + struct xfs_rmap_irec check_rec; + struct xchk_bmap_check_rmap_info *sbcri = priv; + struct xfs_ifork *ifp; + struct xfs_scrub *sc = sbcri->sc; + bool have_map; + + /* Is this even the right fork? */ + if (rec->rm_owner != sc->ip->i_ino) + return 0; + if ((sbcri->whichfork == XFS_ATTR_FORK) ^ + !!(rec->rm_flags & XFS_RMAP_ATTR_FORK)) + return 0; + if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK) + return 0; + + /* Now look up the bmbt record. */ + ifp = xfs_ifork_ptr(sc->ip, sbcri->whichfork); + if (!ifp) { + xchk_fblock_set_corrupt(sc, sbcri->whichfork, + rec->rm_offset); + goto out; + } + have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset, + &sbcri->icur, &irec); + if (!have_map) + xchk_fblock_set_corrupt(sc, sbcri->whichfork, + rec->rm_offset); + /* + * bmap extent record lengths are constrained to 2^21 blocks in length + * because of space constraints in the on-disk metadata structure. + * However, rmap extent record lengths are constrained only by AG + * length, so we have to loop through the bmbt to make sure that the + * entire rmap is covered by bmbt records. + */ + check_rec = *rec; + while (have_map) { + if (irec.br_startoff != check_rec.rm_offset) + xchk_fblock_set_corrupt(sc, sbcri->whichfork, + check_rec.rm_offset); + if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp, + cur->bc_ag.pag->pag_agno, + check_rec.rm_startblock)) + xchk_fblock_set_corrupt(sc, sbcri->whichfork, + check_rec.rm_offset); + if (irec.br_blockcount > check_rec.rm_blockcount) + xchk_fblock_set_corrupt(sc, sbcri->whichfork, + check_rec.rm_offset); + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + break; + check_rec.rm_startblock += irec.br_blockcount; + check_rec.rm_offset += irec.br_blockcount; + check_rec.rm_blockcount -= irec.br_blockcount; + if (check_rec.rm_blockcount == 0) + break; + have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec); + if (!have_map) + xchk_fblock_set_corrupt(sc, sbcri->whichfork, + check_rec.rm_offset); + } + +out: + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return -ECANCELED; + return 0; +} + +/* Make sure each rmap has a corresponding bmbt entry. */ +STATIC int +xchk_bmap_check_ag_rmaps( + struct xfs_scrub *sc, + int whichfork, + struct xfs_perag *pag) +{ + struct xchk_bmap_check_rmap_info sbcri; + struct xfs_btree_cur *cur; + struct xfs_buf *agf; + int error; + + error = xfs_alloc_read_agf(pag, sc->tp, 0, &agf); + if (error) + return error; + + cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, pag); + + sbcri.sc = sc; + sbcri.whichfork = whichfork; + error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri); + if (error == -ECANCELED) + error = 0; + + xfs_btree_del_cursor(cur, error); + xfs_trans_brelse(sc->tp, agf); + return error; +} + +/* Make sure each rmap has a corresponding bmbt entry. */ +STATIC int +xchk_bmap_check_rmaps( + struct xfs_scrub *sc, + int whichfork) +{ + struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork); + struct xfs_perag *pag; + xfs_agnumber_t agno; + bool zero_size; + int error; + + if (!xfs_has_rmapbt(sc->mp) || + whichfork == XFS_COW_FORK || + (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + return 0; + + /* Don't support realtime rmap checks yet. */ + if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK) + return 0; + + ASSERT(xfs_ifork_ptr(sc->ip, whichfork) != NULL); + + /* + * Only do this for complex maps that are in btree format, or for + * situations where we would seem to have a size but zero extents. + * The inode repair code can zap broken iforks, which means we have + * to flag this bmap as corrupt if there are rmaps that need to be + * reattached. + */ + + if (whichfork == XFS_DATA_FORK) + zero_size = i_size_read(VFS_I(sc->ip)) == 0; + else + zero_size = false; + + if (ifp->if_format != XFS_DINODE_FMT_BTREE && + (zero_size || ifp->if_nextents > 0)) + return 0; + + for_each_perag(sc->mp, agno, pag) { + error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag); + if (error) + break; + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + break; + } + if (pag) + xfs_perag_put(pag); + return error; +} + +/* + * Scrub an inode fork's block mappings. + * + * First we scan every record in every btree block, if applicable. + * Then we unconditionally scan the incore extent cache. + */ +STATIC int +xchk_bmap( + struct xfs_scrub *sc, + int whichfork) +{ + struct xfs_bmbt_irec irec; + struct xchk_bmap_info info = { NULL }; + struct xfs_mount *mp = sc->mp; + struct xfs_inode *ip = sc->ip; + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); + xfs_fileoff_t endoff; + struct xfs_iext_cursor icur; + int error = 0; + + /* Non-existent forks can be ignored. */ + if (!ifp) + goto out; + + info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip); + info.whichfork = whichfork; + info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip); + info.sc = sc; + + switch (whichfork) { + case XFS_COW_FORK: + /* No CoW forks on non-reflink inodes/filesystems. */ + if (!xfs_is_reflink_inode(ip)) { + xchk_ino_set_corrupt(sc, sc->ip->i_ino); + goto out; + } + break; + case XFS_ATTR_FORK: + if (!xfs_has_attr(mp) && !xfs_has_attr2(mp)) + xchk_ino_set_corrupt(sc, sc->ip->i_ino); + break; + default: + ASSERT(whichfork == XFS_DATA_FORK); + break; + } + + /* Check the fork values */ + switch (ifp->if_format) { + case XFS_DINODE_FMT_UUID: + case XFS_DINODE_FMT_DEV: + case XFS_DINODE_FMT_LOCAL: + /* No mappings to check. */ + goto out; + case XFS_DINODE_FMT_EXTENTS: + break; + case XFS_DINODE_FMT_BTREE: + if (whichfork == XFS_COW_FORK) { + xchk_fblock_set_corrupt(sc, whichfork, 0); + goto out; + } + + error = xchk_bmap_btree(sc, whichfork, &info); + if (error) + goto out; + break; + default: + xchk_fblock_set_corrupt(sc, whichfork, 0); + goto out; + } + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + /* Find the offset of the last extent in the mapping. */ + error = xfs_bmap_last_offset(ip, &endoff, whichfork); + if (!xchk_fblock_process_error(sc, whichfork, 0, &error)) + goto out; + + /* Scrub extent records. */ + info.lastoff = 0; + ifp = xfs_ifork_ptr(ip, whichfork); + for_each_xfs_iext(ifp, &icur, &irec) { + if (xchk_should_terminate(sc, &error) || + (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + goto out; + if (isnullstartblock(irec.br_startblock)) + continue; + if (irec.br_startoff >= endoff) { + xchk_fblock_set_corrupt(sc, whichfork, + irec.br_startoff); + goto out; + } + error = xchk_bmap_iextent(ip, &info, &irec); + if (error) + goto out; + } + + error = xchk_bmap_check_rmaps(sc, whichfork); + if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error)) + goto out; +out: + return error; +} + +/* Scrub an inode's data fork. */ +int +xchk_bmap_data( + struct xfs_scrub *sc) +{ + return xchk_bmap(sc, XFS_DATA_FORK); +} + +/* Scrub an inode's attr fork. */ +int +xchk_bmap_attr( + struct xfs_scrub *sc) +{ + return xchk_bmap(sc, XFS_ATTR_FORK); +} + +/* Scrub an inode's CoW fork. */ +int +xchk_bmap_cow( + struct xfs_scrub *sc) +{ + if (!xfs_is_reflink_inode(sc->ip)) + return -ENOENT; + + return xchk_bmap(sc, XFS_COW_FORK); +} diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c new file mode 100644 index 000000000..2f4519590 --- /dev/null +++ b/fs/xfs/scrub/btree.c @@ -0,0 +1,748 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_inode.h" +#include "xfs_btree.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" +#include "scrub/trace.h" + +/* btree scrubbing */ + +/* + * Check for btree operation errors. See the section about handling + * operational errors in common.c. + */ +static bool +__xchk_btree_process_error( + struct xfs_scrub *sc, + struct xfs_btree_cur *cur, + int level, + int *error, + __u32 errflag, + void *ret_ip) +{ + if (*error == 0) + return true; + + switch (*error) { + case -EDEADLOCK: + /* Used to restart an op with deadlock avoidance. */ + trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); + break; + case -EFSBADCRC: + case -EFSCORRUPTED: + /* Note the badness but don't abort. */ + sc->sm->sm_flags |= errflag; + *error = 0; + fallthrough; + default: + if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) + trace_xchk_ifork_btree_op_error(sc, cur, level, + *error, ret_ip); + else + trace_xchk_btree_op_error(sc, cur, level, + *error, ret_ip); + break; + } + return false; +} + +bool +xchk_btree_process_error( + struct xfs_scrub *sc, + struct xfs_btree_cur *cur, + int level, + int *error) +{ + return __xchk_btree_process_error(sc, cur, level, error, + XFS_SCRUB_OFLAG_CORRUPT, __return_address); +} + +bool +xchk_btree_xref_process_error( + struct xfs_scrub *sc, + struct xfs_btree_cur *cur, + int level, + int *error) +{ + return __xchk_btree_process_error(sc, cur, level, error, + XFS_SCRUB_OFLAG_XFAIL, __return_address); +} + +/* Record btree block corruption. */ +static void +__xchk_btree_set_corrupt( + struct xfs_scrub *sc, + struct xfs_btree_cur *cur, + int level, + __u32 errflag, + void *ret_ip) +{ + sc->sm->sm_flags |= errflag; + + if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) + trace_xchk_ifork_btree_error(sc, cur, level, + ret_ip); + else + trace_xchk_btree_error(sc, cur, level, + ret_ip); +} + +void +xchk_btree_set_corrupt( + struct xfs_scrub *sc, + struct xfs_btree_cur *cur, + int level) +{ + __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT, + __return_address); +} + +void +xchk_btree_xref_set_corrupt( + struct xfs_scrub *sc, + struct xfs_btree_cur *cur, + int level) +{ + __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT, + __return_address); +} + +/* + * Make sure this record is in order and doesn't stray outside of the parent + * keys. + */ +STATIC void +xchk_btree_rec( + struct xchk_btree *bs) +{ + struct xfs_btree_cur *cur = bs->cur; + union xfs_btree_rec *rec; + union xfs_btree_key key; + union xfs_btree_key hkey; + union xfs_btree_key *keyp; + struct xfs_btree_block *block; + struct xfs_btree_block *keyblock; + struct xfs_buf *bp; + + block = xfs_btree_get_block(cur, 0, &bp); + rec = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr, block); + + trace_xchk_btree_rec(bs->sc, cur, 0); + + /* If this isn't the first record, are they in order? */ + if (cur->bc_levels[0].ptr > 1 && + !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec)) + xchk_btree_set_corrupt(bs->sc, cur, 0); + memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len); + + if (cur->bc_nlevels == 1) + return; + + /* Is this at least as large as the parent low key? */ + cur->bc_ops->init_key_from_rec(&key, rec); + keyblock = xfs_btree_get_block(cur, 1, &bp); + keyp = xfs_btree_key_addr(cur, cur->bc_levels[1].ptr, keyblock); + if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0) + xchk_btree_set_corrupt(bs->sc, cur, 1); + + if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) + return; + + /* Is this no larger than the parent high key? */ + cur->bc_ops->init_high_key_from_rec(&hkey, rec); + keyp = xfs_btree_high_key_addr(cur, cur->bc_levels[1].ptr, keyblock); + if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0) + xchk_btree_set_corrupt(bs->sc, cur, 1); +} + +/* + * Make sure this key is in order and doesn't stray outside of the parent + * keys. + */ +STATIC void +xchk_btree_key( + struct xchk_btree *bs, + int level) +{ + struct xfs_btree_cur *cur = bs->cur; + union xfs_btree_key *key; + union xfs_btree_key *keyp; + struct xfs_btree_block *block; + struct xfs_btree_block *keyblock; + struct xfs_buf *bp; + + block = xfs_btree_get_block(cur, level, &bp); + key = xfs_btree_key_addr(cur, cur->bc_levels[level].ptr, block); + + trace_xchk_btree_key(bs->sc, cur, level); + + /* If this isn't the first key, are they in order? */ + if (cur->bc_levels[level].ptr > 1 && + !cur->bc_ops->keys_inorder(cur, &bs->lastkey[level - 1], key)) + xchk_btree_set_corrupt(bs->sc, cur, level); + memcpy(&bs->lastkey[level - 1], key, cur->bc_ops->key_len); + + if (level + 1 >= cur->bc_nlevels) + return; + + /* Is this at least as large as the parent low key? */ + keyblock = xfs_btree_get_block(cur, level + 1, &bp); + keyp = xfs_btree_key_addr(cur, cur->bc_levels[level + 1].ptr, keyblock); + if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0) + xchk_btree_set_corrupt(bs->sc, cur, level); + + if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) + return; + + /* Is this no larger than the parent high key? */ + key = xfs_btree_high_key_addr(cur, cur->bc_levels[level].ptr, block); + keyp = xfs_btree_high_key_addr(cur, cur->bc_levels[level + 1].ptr, + keyblock); + if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0) + xchk_btree_set_corrupt(bs->sc, cur, level); +} + +/* + * Check a btree pointer. Returns true if it's ok to use this pointer. + * Callers do not need to set the corrupt flag. + */ +static bool +xchk_btree_ptr_ok( + struct xchk_btree *bs, + int level, + union xfs_btree_ptr *ptr) +{ + bool res; + + /* A btree rooted in an inode has no block pointer to the root. */ + if ((bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && + level == bs->cur->bc_nlevels) + return true; + + /* Otherwise, check the pointers. */ + if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS) + res = xfs_btree_check_lptr(bs->cur, be64_to_cpu(ptr->l), level); + else + res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level); + if (!res) + xchk_btree_set_corrupt(bs->sc, bs->cur, level); + + return res; +} + +/* Check that a btree block's sibling matches what we expect it. */ +STATIC int +xchk_btree_block_check_sibling( + struct xchk_btree *bs, + int level, + int direction, + union xfs_btree_ptr *sibling) +{ + struct xfs_btree_cur *cur = bs->cur; + struct xfs_btree_block *pblock; + struct xfs_buf *pbp; + struct xfs_btree_cur *ncur = NULL; + union xfs_btree_ptr *pp; + int success; + int error; + + error = xfs_btree_dup_cursor(cur, &ncur); + if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error) || + !ncur) + return error; + + /* + * If the pointer is null, we shouldn't be able to move the upper + * level pointer anywhere. + */ + if (xfs_btree_ptr_is_null(cur, sibling)) { + if (direction > 0) + error = xfs_btree_increment(ncur, level + 1, &success); + else + error = xfs_btree_decrement(ncur, level + 1, &success); + if (error == 0 && success) + xchk_btree_set_corrupt(bs->sc, cur, level); + error = 0; + goto out; + } + + /* Increment upper level pointer. */ + if (direction > 0) + error = xfs_btree_increment(ncur, level + 1, &success); + else + error = xfs_btree_decrement(ncur, level + 1, &success); + if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error)) + goto out; + if (!success) { + xchk_btree_set_corrupt(bs->sc, cur, level + 1); + goto out; + } + + /* Compare upper level pointer to sibling pointer. */ + pblock = xfs_btree_get_block(ncur, level + 1, &pbp); + pp = xfs_btree_ptr_addr(ncur, ncur->bc_levels[level + 1].ptr, pblock); + if (!xchk_btree_ptr_ok(bs, level + 1, pp)) + goto out; + if (pbp) + xchk_buffer_recheck(bs->sc, pbp); + + if (xfs_btree_diff_two_ptrs(cur, pp, sibling)) + xchk_btree_set_corrupt(bs->sc, cur, level); +out: + xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR); + return error; +} + +/* Check the siblings of a btree block. */ +STATIC int +xchk_btree_block_check_siblings( + struct xchk_btree *bs, + struct xfs_btree_block *block) +{ + struct xfs_btree_cur *cur = bs->cur; + union xfs_btree_ptr leftsib; + union xfs_btree_ptr rightsib; + int level; + int error = 0; + + xfs_btree_get_sibling(cur, block, &leftsib, XFS_BB_LEFTSIB); + xfs_btree_get_sibling(cur, block, &rightsib, XFS_BB_RIGHTSIB); + level = xfs_btree_get_level(block); + + /* Root block should never have siblings. */ + if (level == cur->bc_nlevels - 1) { + if (!xfs_btree_ptr_is_null(cur, &leftsib) || + !xfs_btree_ptr_is_null(cur, &rightsib)) + xchk_btree_set_corrupt(bs->sc, cur, level); + goto out; + } + + /* + * Does the left & right sibling pointers match the adjacent + * parent level pointers? + * (These function absorbs error codes for us.) + */ + error = xchk_btree_block_check_sibling(bs, level, -1, &leftsib); + if (error) + return error; + error = xchk_btree_block_check_sibling(bs, level, 1, &rightsib); + if (error) + return error; +out: + return error; +} + +struct check_owner { + struct list_head list; + xfs_daddr_t daddr; + int level; +}; + +/* + * Make sure this btree block isn't in the free list and that there's + * an rmap record for it. + */ +STATIC int +xchk_btree_check_block_owner( + struct xchk_btree *bs, + int level, + xfs_daddr_t daddr) +{ + xfs_agnumber_t agno; + xfs_agblock_t agbno; + xfs_btnum_t btnum; + bool init_sa; + int error = 0; + + if (!bs->cur) + return 0; + + btnum = bs->cur->bc_btnum; + agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr); + agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr); + + init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS; + if (init_sa) { + error = xchk_ag_init_existing(bs->sc, agno, &bs->sc->sa); + if (!xchk_btree_xref_process_error(bs->sc, bs->cur, + level, &error)) + goto out_free; + } + + xchk_xref_is_used_space(bs->sc, agbno, 1); + /* + * The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we + * have to nullify it (to shut down further block owner checks) if + * self-xref encounters problems. + */ + if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO) + bs->cur = NULL; + + xchk_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo); + if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP) + bs->cur = NULL; + +out_free: + if (init_sa) + xchk_ag_free(bs->sc, &bs->sc->sa); + + return error; +} + +/* Check the owner of a btree block. */ +STATIC int +xchk_btree_check_owner( + struct xchk_btree *bs, + int level, + struct xfs_buf *bp) +{ + struct xfs_btree_cur *cur = bs->cur; + struct check_owner *co; + + /* + * In theory, xfs_btree_get_block should only give us a null buffer + * pointer for the root of a root-in-inode btree type, but we need + * to check defensively here in case the cursor state is also screwed + * up. + */ + if (bp == NULL) { + if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)) + xchk_btree_set_corrupt(bs->sc, bs->cur, level); + return 0; + } + + /* + * We want to cross-reference each btree block with the bnobt + * and the rmapbt. We cannot cross-reference the bnobt or + * rmapbt while scanning the bnobt or rmapbt, respectively, + * because we cannot alter the cursor and we'd prefer not to + * duplicate cursors. Therefore, save the buffer daddr for + * later scanning. + */ + if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) { + co = kmem_alloc(sizeof(struct check_owner), + KM_MAYFAIL); + if (!co) + return -ENOMEM; + co->level = level; + co->daddr = xfs_buf_daddr(bp); + list_add_tail(&co->list, &bs->to_check); + return 0; + } + + return xchk_btree_check_block_owner(bs, level, xfs_buf_daddr(bp)); +} + +/* Decide if we want to check minrecs of a btree block in the inode root. */ +static inline bool +xchk_btree_check_iroot_minrecs( + struct xchk_btree *bs) +{ + /* + * xfs_bmap_add_attrfork_btree had an implementation bug wherein it + * would miscalculate the space required for the data fork bmbt root + * when adding an attr fork, and promote the iroot contents to an + * external block unnecessarily. This went unnoticed for many years + * until scrub found filesystems in this state. Inode rooted btrees are + * not supposed to have immediate child blocks that are small enough + * that the contents could fit in the inode root, but we can't fail + * existing filesystems, so instead we disable the check for data fork + * bmap btrees when there's an attr fork. + */ + if (bs->cur->bc_btnum == XFS_BTNUM_BMAP && + bs->cur->bc_ino.whichfork == XFS_DATA_FORK && + xfs_inode_has_attr_fork(bs->sc->ip)) + return false; + + return true; +} + +/* + * Check that this btree block has at least minrecs records or is one of the + * special blocks that don't require that. + */ +STATIC void +xchk_btree_check_minrecs( + struct xchk_btree *bs, + int level, + struct xfs_btree_block *block) +{ + struct xfs_btree_cur *cur = bs->cur; + unsigned int root_level = cur->bc_nlevels - 1; + unsigned int numrecs = be16_to_cpu(block->bb_numrecs); + + /* More records than minrecs means the block is ok. */ + if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) + return; + + /* + * For btrees rooted in the inode, it's possible that the root block + * contents spilled into a regular ondisk block because there wasn't + * enough space in the inode root. The number of records in that + * child block might be less than the standard minrecs, but that's ok + * provided that there's only one direct child of the root. + */ + if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && + level == cur->bc_nlevels - 2) { + struct xfs_btree_block *root_block; + struct xfs_buf *root_bp; + int root_maxrecs; + + root_block = xfs_btree_get_block(cur, root_level, &root_bp); + root_maxrecs = cur->bc_ops->get_dmaxrecs(cur, root_level); + if (xchk_btree_check_iroot_minrecs(bs) && + (be16_to_cpu(root_block->bb_numrecs) != 1 || + numrecs <= root_maxrecs)) + xchk_btree_set_corrupt(bs->sc, cur, level); + return; + } + + /* + * Otherwise, only the root level is allowed to have fewer than minrecs + * records or keyptrs. + */ + if (level < root_level) + xchk_btree_set_corrupt(bs->sc, cur, level); +} + +/* + * Grab and scrub a btree block given a btree pointer. Returns block + * and buffer pointers (if applicable) if they're ok to use. + */ +STATIC int +xchk_btree_get_block( + struct xchk_btree *bs, + int level, + union xfs_btree_ptr *pp, + struct xfs_btree_block **pblock, + struct xfs_buf **pbp) +{ + xfs_failaddr_t failed_at; + int error; + + *pblock = NULL; + *pbp = NULL; + + error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock); + if (!xchk_btree_process_error(bs->sc, bs->cur, level, &error) || + !*pblock) + return error; + + xfs_btree_get_block(bs->cur, level, pbp); + if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS) + failed_at = __xfs_btree_check_lblock(bs->cur, *pblock, + level, *pbp); + else + failed_at = __xfs_btree_check_sblock(bs->cur, *pblock, + level, *pbp); + if (failed_at) { + xchk_btree_set_corrupt(bs->sc, bs->cur, level); + return 0; + } + if (*pbp) + xchk_buffer_recheck(bs->sc, *pbp); + + xchk_btree_check_minrecs(bs, level, *pblock); + + /* + * Check the block's owner; this function absorbs error codes + * for us. + */ + error = xchk_btree_check_owner(bs, level, *pbp); + if (error) + return error; + + /* + * Check the block's siblings; this function absorbs error codes + * for us. + */ + return xchk_btree_block_check_siblings(bs, *pblock); +} + +/* + * Check that the low and high keys of this block match the keys stored + * in the parent block. + */ +STATIC void +xchk_btree_block_keys( + struct xchk_btree *bs, + int level, + struct xfs_btree_block *block) +{ + union xfs_btree_key block_keys; + struct xfs_btree_cur *cur = bs->cur; + union xfs_btree_key *high_bk; + union xfs_btree_key *parent_keys; + union xfs_btree_key *high_pk; + struct xfs_btree_block *parent_block; + struct xfs_buf *bp; + + if (level >= cur->bc_nlevels - 1) + return; + + /* Calculate the keys for this block. */ + xfs_btree_get_keys(cur, block, &block_keys); + + /* Obtain the parent's copy of the keys for this block. */ + parent_block = xfs_btree_get_block(cur, level + 1, &bp); + parent_keys = xfs_btree_key_addr(cur, cur->bc_levels[level + 1].ptr, + parent_block); + + if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0) + xchk_btree_set_corrupt(bs->sc, cur, 1); + + if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) + return; + + /* Get high keys */ + high_bk = xfs_btree_high_key_from_key(cur, &block_keys); + high_pk = xfs_btree_high_key_addr(cur, cur->bc_levels[level + 1].ptr, + parent_block); + + if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0) + xchk_btree_set_corrupt(bs->sc, cur, 1); +} + +/* + * Visit all nodes and leaves of a btree. Check that all pointers and + * records are in order, that the keys reflect the records, and use a callback + * so that the caller can verify individual records. + */ +int +xchk_btree( + struct xfs_scrub *sc, + struct xfs_btree_cur *cur, + xchk_btree_rec_fn scrub_fn, + const struct xfs_owner_info *oinfo, + void *private) +{ + union xfs_btree_ptr ptr; + struct xchk_btree *bs; + union xfs_btree_ptr *pp; + union xfs_btree_rec *recp; + struct xfs_btree_block *block; + struct xfs_buf *bp; + struct check_owner *co; + struct check_owner *n; + size_t cur_sz; + int level; + int error = 0; + + /* + * Allocate the btree scrub context from the heap, because this + * structure can get rather large. Don't let a caller feed us a + * totally absurd size. + */ + cur_sz = xchk_btree_sizeof(cur->bc_nlevels); + if (cur_sz > PAGE_SIZE) { + xchk_btree_set_corrupt(sc, cur, 0); + return 0; + } + bs = kmem_zalloc(cur_sz, KM_NOFS | KM_MAYFAIL); + if (!bs) + return -ENOMEM; + bs->cur = cur; + bs->scrub_rec = scrub_fn; + bs->oinfo = oinfo; + bs->private = private; + bs->sc = sc; + + /* Initialize scrub state */ + INIT_LIST_HEAD(&bs->to_check); + + /* + * Load the root of the btree. The helper function absorbs + * error codes for us. + */ + level = cur->bc_nlevels - 1; + cur->bc_ops->init_ptr_from_cur(cur, &ptr); + if (!xchk_btree_ptr_ok(bs, cur->bc_nlevels, &ptr)) + goto out; + error = xchk_btree_get_block(bs, level, &ptr, &block, &bp); + if (error || !block) + goto out; + + cur->bc_levels[level].ptr = 1; + + while (level < cur->bc_nlevels) { + block = xfs_btree_get_block(cur, level, &bp); + + if (level == 0) { + /* End of leaf, pop back towards the root. */ + if (cur->bc_levels[level].ptr > + be16_to_cpu(block->bb_numrecs)) { + xchk_btree_block_keys(bs, level, block); + if (level < cur->bc_nlevels - 1) + cur->bc_levels[level + 1].ptr++; + level++; + continue; + } + + /* Records in order for scrub? */ + xchk_btree_rec(bs); + + /* Call out to the record checker. */ + recp = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr, + block); + error = bs->scrub_rec(bs, recp); + if (error) + break; + if (xchk_should_terminate(sc, &error) || + (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + break; + + cur->bc_levels[level].ptr++; + continue; + } + + /* End of node, pop back towards the root. */ + if (cur->bc_levels[level].ptr > + be16_to_cpu(block->bb_numrecs)) { + xchk_btree_block_keys(bs, level, block); + if (level < cur->bc_nlevels - 1) + cur->bc_levels[level + 1].ptr++; + level++; + continue; + } + + /* Keys in order for scrub? */ + xchk_btree_key(bs, level); + + /* Drill another level deeper. */ + pp = xfs_btree_ptr_addr(cur, cur->bc_levels[level].ptr, block); + if (!xchk_btree_ptr_ok(bs, level, pp)) { + cur->bc_levels[level].ptr++; + continue; + } + level--; + error = xchk_btree_get_block(bs, level, pp, &block, &bp); + if (error || !block) + goto out; + + cur->bc_levels[level].ptr = 1; + } + +out: + /* Process deferred owner checks on btree blocks. */ + list_for_each_entry_safe(co, n, &bs->to_check, list) { + if (!error && bs->cur) + error = xchk_btree_check_block_owner(bs, co->level, + co->daddr); + list_del(&co->list); + kmem_free(co); + } + kmem_free(bs); + + return error; +} diff --git a/fs/xfs/scrub/btree.h b/fs/xfs/scrub/btree.h new file mode 100644 index 000000000..da61a53a0 --- /dev/null +++ b/fs/xfs/scrub/btree.h @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_BTREE_H__ +#define __XFS_SCRUB_BTREE_H__ + +/* btree scrub */ + +/* Check for btree operation errors. */ +bool xchk_btree_process_error(struct xfs_scrub *sc, + struct xfs_btree_cur *cur, int level, int *error); + +/* Check for btree xref operation errors. */ +bool xchk_btree_xref_process_error(struct xfs_scrub *sc, + struct xfs_btree_cur *cur, int level, int *error); + +/* Check for btree corruption. */ +void xchk_btree_set_corrupt(struct xfs_scrub *sc, + struct xfs_btree_cur *cur, int level); + +/* Check for btree xref discrepancies. */ +void xchk_btree_xref_set_corrupt(struct xfs_scrub *sc, + struct xfs_btree_cur *cur, int level); + +struct xchk_btree; +typedef int (*xchk_btree_rec_fn)( + struct xchk_btree *bs, + const union xfs_btree_rec *rec); + +struct xchk_btree { + /* caller-provided scrub state */ + struct xfs_scrub *sc; + struct xfs_btree_cur *cur; + xchk_btree_rec_fn scrub_rec; + const struct xfs_owner_info *oinfo; + void *private; + + /* internal scrub state */ + union xfs_btree_rec lastrec; + struct list_head to_check; + + /* this element must come last! */ + union xfs_btree_key lastkey[]; +}; + +/* + * Calculate the size of a xchk_btree structure. There are nlevels-1 slots for + * keys because we track leaf records separately in lastrec. + */ +static inline size_t +xchk_btree_sizeof(unsigned int nlevels) +{ + return struct_size((struct xchk_btree *)NULL, lastkey, nlevels - 1); +} + +int xchk_btree(struct xfs_scrub *sc, struct xfs_btree_cur *cur, + xchk_btree_rec_fn scrub_fn, const struct xfs_owner_info *oinfo, + void *private); + +#endif /* __XFS_SCRUB_BTREE_H__ */ diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c new file mode 100644 index 000000000..e71449658 --- /dev/null +++ b/fs/xfs/scrub/common.c @@ -0,0 +1,867 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_inode.h" +#include "xfs_icache.h" +#include "xfs_alloc.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc.h" +#include "xfs_ialloc_btree.h" +#include "xfs_refcount_btree.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "xfs_log.h" +#include "xfs_trans_priv.h" +#include "xfs_da_format.h" +#include "xfs_da_btree.h" +#include "xfs_attr.h" +#include "xfs_reflink.h" +#include "xfs_ag.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/trace.h" +#include "scrub/repair.h" +#include "scrub/health.h" + +/* Common code for the metadata scrubbers. */ + +/* + * Handling operational errors. + * + * The *_process_error() family of functions are used to process error return + * codes from functions called as part of a scrub operation. + * + * If there's no error, we return true to tell the caller that it's ok + * to move on to the next check in its list. + * + * For non-verifier errors (e.g. ENOMEM) we return false to tell the + * caller that something bad happened, and we preserve *error so that + * the caller can return the *error up the stack to userspace. + * + * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting + * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words, + * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT, + * not via return codes. We return false to tell the caller that + * something bad happened. Since the error has been cleared, the caller + * will (presumably) return that zero and scrubbing will move on to + * whatever's next. + * + * ftrace can be used to record the precise metadata location and the + * approximate code location of the failed operation. + */ + +/* Check for operational errors. */ +static bool +__xchk_process_error( + struct xfs_scrub *sc, + xfs_agnumber_t agno, + xfs_agblock_t bno, + int *error, + __u32 errflag, + void *ret_ip) +{ + switch (*error) { + case 0: + return true; + case -EDEADLOCK: + /* Used to restart an op with deadlock avoidance. */ + trace_xchk_deadlock_retry( + sc->ip ? sc->ip : XFS_I(file_inode(sc->file)), + sc->sm, *error); + break; + case -EFSBADCRC: + case -EFSCORRUPTED: + /* Note the badness but don't abort. */ + sc->sm->sm_flags |= errflag; + *error = 0; + fallthrough; + default: + trace_xchk_op_error(sc, agno, bno, *error, + ret_ip); + break; + } + return false; +} + +bool +xchk_process_error( + struct xfs_scrub *sc, + xfs_agnumber_t agno, + xfs_agblock_t bno, + int *error) +{ + return __xchk_process_error(sc, agno, bno, error, + XFS_SCRUB_OFLAG_CORRUPT, __return_address); +} + +bool +xchk_xref_process_error( + struct xfs_scrub *sc, + xfs_agnumber_t agno, + xfs_agblock_t bno, + int *error) +{ + return __xchk_process_error(sc, agno, bno, error, + XFS_SCRUB_OFLAG_XFAIL, __return_address); +} + +/* Check for operational errors for a file offset. */ +static bool +__xchk_fblock_process_error( + struct xfs_scrub *sc, + int whichfork, + xfs_fileoff_t offset, + int *error, + __u32 errflag, + void *ret_ip) +{ + switch (*error) { + case 0: + return true; + case -EDEADLOCK: + /* Used to restart an op with deadlock avoidance. */ + trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); + break; + case -EFSBADCRC: + case -EFSCORRUPTED: + /* Note the badness but don't abort. */ + sc->sm->sm_flags |= errflag; + *error = 0; + fallthrough; + default: + trace_xchk_file_op_error(sc, whichfork, offset, *error, + ret_ip); + break; + } + return false; +} + +bool +xchk_fblock_process_error( + struct xfs_scrub *sc, + int whichfork, + xfs_fileoff_t offset, + int *error) +{ + return __xchk_fblock_process_error(sc, whichfork, offset, error, + XFS_SCRUB_OFLAG_CORRUPT, __return_address); +} + +bool +xchk_fblock_xref_process_error( + struct xfs_scrub *sc, + int whichfork, + xfs_fileoff_t offset, + int *error) +{ + return __xchk_fblock_process_error(sc, whichfork, offset, error, + XFS_SCRUB_OFLAG_XFAIL, __return_address); +} + +/* + * Handling scrub corruption/optimization/warning checks. + * + * The *_set_{corrupt,preen,warning}() family of functions are used to + * record the presence of metadata that is incorrect (corrupt), could be + * optimized somehow (preen), or should be flagged for administrative + * review but is not incorrect (warn). + * + * ftrace can be used to record the precise metadata location and + * approximate code location of the failed check. + */ + +/* Record a block which could be optimized. */ +void +xchk_block_set_preen( + struct xfs_scrub *sc, + struct xfs_buf *bp) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; + trace_xchk_block_preen(sc, xfs_buf_daddr(bp), __return_address); +} + +/* + * Record an inode which could be optimized. The trace data will + * include the block given by bp if bp is given; otherwise it will use + * the block location of the inode record itself. + */ +void +xchk_ino_set_preen( + struct xfs_scrub *sc, + xfs_ino_t ino) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; + trace_xchk_ino_preen(sc, ino, __return_address); +} + +/* Record something being wrong with the filesystem primary superblock. */ +void +xchk_set_corrupt( + struct xfs_scrub *sc) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; + trace_xchk_fs_error(sc, 0, __return_address); +} + +/* Record a corrupt block. */ +void +xchk_block_set_corrupt( + struct xfs_scrub *sc, + struct xfs_buf *bp) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; + trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address); +} + +/* Record a corruption while cross-referencing. */ +void +xchk_block_xref_set_corrupt( + struct xfs_scrub *sc, + struct xfs_buf *bp) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; + trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address); +} + +/* + * Record a corrupt inode. The trace data will include the block given + * by bp if bp is given; otherwise it will use the block location of the + * inode record itself. + */ +void +xchk_ino_set_corrupt( + struct xfs_scrub *sc, + xfs_ino_t ino) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; + trace_xchk_ino_error(sc, ino, __return_address); +} + +/* Record a corruption while cross-referencing with an inode. */ +void +xchk_ino_xref_set_corrupt( + struct xfs_scrub *sc, + xfs_ino_t ino) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; + trace_xchk_ino_error(sc, ino, __return_address); +} + +/* Record corruption in a block indexed by a file fork. */ +void +xchk_fblock_set_corrupt( + struct xfs_scrub *sc, + int whichfork, + xfs_fileoff_t offset) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; + trace_xchk_fblock_error(sc, whichfork, offset, __return_address); +} + +/* Record a corruption while cross-referencing a fork block. */ +void +xchk_fblock_xref_set_corrupt( + struct xfs_scrub *sc, + int whichfork, + xfs_fileoff_t offset) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; + trace_xchk_fblock_error(sc, whichfork, offset, __return_address); +} + +/* + * Warn about inodes that need administrative review but is not + * incorrect. + */ +void +xchk_ino_set_warning( + struct xfs_scrub *sc, + xfs_ino_t ino) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; + trace_xchk_ino_warning(sc, ino, __return_address); +} + +/* Warn about a block indexed by a file fork that needs review. */ +void +xchk_fblock_set_warning( + struct xfs_scrub *sc, + int whichfork, + xfs_fileoff_t offset) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; + trace_xchk_fblock_warning(sc, whichfork, offset, __return_address); +} + +/* Signal an incomplete scrub. */ +void +xchk_set_incomplete( + struct xfs_scrub *sc) +{ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE; + trace_xchk_incomplete(sc, __return_address); +} + +/* + * rmap scrubbing -- compute the number of blocks with a given owner, + * at least according to the reverse mapping data. + */ + +struct xchk_rmap_ownedby_info { + const struct xfs_owner_info *oinfo; + xfs_filblks_t *blocks; +}; + +STATIC int +xchk_count_rmap_ownedby_irec( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xchk_rmap_ownedby_info *sroi = priv; + bool irec_attr; + bool oinfo_attr; + + irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK; + oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK; + + if (rec->rm_owner != sroi->oinfo->oi_owner) + return 0; + + if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr) + (*sroi->blocks) += rec->rm_blockcount; + + return 0; +} + +/* + * Calculate the number of blocks the rmap thinks are owned by something. + * The caller should pass us an rmapbt cursor. + */ +int +xchk_count_rmap_ownedby_ag( + struct xfs_scrub *sc, + struct xfs_btree_cur *cur, + const struct xfs_owner_info *oinfo, + xfs_filblks_t *blocks) +{ + struct xchk_rmap_ownedby_info sroi = { + .oinfo = oinfo, + .blocks = blocks, + }; + + *blocks = 0; + return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec, + &sroi); +} + +/* + * AG scrubbing + * + * These helpers facilitate locking an allocation group's header + * buffers, setting up cursors for all btrees that are present, and + * cleaning everything up once we're through. + */ + +/* Decide if we want to return an AG header read failure. */ +static inline bool +want_ag_read_header_failure( + struct xfs_scrub *sc, + unsigned int type) +{ + /* Return all AG header read failures when scanning btrees. */ + if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF && + sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL && + sc->sm->sm_type != XFS_SCRUB_TYPE_AGI) + return true; + /* + * If we're scanning a given type of AG header, we only want to + * see read failures from that specific header. We'd like the + * other headers to cross-check them, but this isn't required. + */ + if (sc->sm->sm_type == type) + return true; + return false; +} + +/* + * Grab the perag structure and all the headers for an AG. + * + * The headers should be released by xchk_ag_free, but as a fail safe we attach + * all the buffers we grab to the scrub transaction so they'll all be freed + * when we cancel it. Returns ENOENT if we can't grab the perag structure. + */ +int +xchk_ag_read_headers( + struct xfs_scrub *sc, + xfs_agnumber_t agno, + struct xchk_ag *sa) +{ + struct xfs_mount *mp = sc->mp; + int error; + + ASSERT(!sa->pag); + sa->pag = xfs_perag_get(mp, agno); + if (!sa->pag) + return -ENOENT; + + error = xfs_ialloc_read_agi(sa->pag, sc->tp, &sa->agi_bp); + if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI)) + return error; + + error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp); + if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF)) + return error; + + error = xfs_alloc_read_agfl(sa->pag, sc->tp, &sa->agfl_bp); + if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL)) + return error; + + return 0; +} + +/* Release all the AG btree cursors. */ +void +xchk_ag_btcur_free( + struct xchk_ag *sa) +{ + if (sa->refc_cur) + xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR); + if (sa->rmap_cur) + xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR); + if (sa->fino_cur) + xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR); + if (sa->ino_cur) + xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR); + if (sa->cnt_cur) + xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR); + if (sa->bno_cur) + xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR); + + sa->refc_cur = NULL; + sa->rmap_cur = NULL; + sa->fino_cur = NULL; + sa->ino_cur = NULL; + sa->bno_cur = NULL; + sa->cnt_cur = NULL; +} + +/* Initialize all the btree cursors for an AG. */ +void +xchk_ag_btcur_init( + struct xfs_scrub *sc, + struct xchk_ag *sa) +{ + struct xfs_mount *mp = sc->mp; + + if (sa->agf_bp && + xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) { + /* Set up a bnobt cursor for cross-referencing. */ + sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp, + sa->pag, XFS_BTNUM_BNO); + } + + if (sa->agf_bp && + xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) { + /* Set up a cntbt cursor for cross-referencing. */ + sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp, + sa->pag, XFS_BTNUM_CNT); + } + + /* Set up a inobt cursor for cross-referencing. */ + if (sa->agi_bp && + xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) { + sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp, + sa->pag, XFS_BTNUM_INO); + } + + /* Set up a finobt cursor for cross-referencing. */ + if (sa->agi_bp && xfs_has_finobt(mp) && + xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) { + sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp, + sa->pag, XFS_BTNUM_FINO); + } + + /* Set up a rmapbt cursor for cross-referencing. */ + if (sa->agf_bp && xfs_has_rmapbt(mp) && + xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) { + sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp, + sa->pag); + } + + /* Set up a refcountbt cursor for cross-referencing. */ + if (sa->agf_bp && xfs_has_reflink(mp) && + xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) { + sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp, + sa->agf_bp, sa->pag); + } +} + +/* Release the AG header context and btree cursors. */ +void +xchk_ag_free( + struct xfs_scrub *sc, + struct xchk_ag *sa) +{ + xchk_ag_btcur_free(sa); + if (sa->agfl_bp) { + xfs_trans_brelse(sc->tp, sa->agfl_bp); + sa->agfl_bp = NULL; + } + if (sa->agf_bp) { + xfs_trans_brelse(sc->tp, sa->agf_bp); + sa->agf_bp = NULL; + } + if (sa->agi_bp) { + xfs_trans_brelse(sc->tp, sa->agi_bp); + sa->agi_bp = NULL; + } + if (sa->pag) { + xfs_perag_put(sa->pag); + sa->pag = NULL; + } +} + +/* + * For scrub, grab the perag structure, the AGI, and the AGF headers, in that + * order. Locking order requires us to get the AGI before the AGF. We use the + * transaction to avoid deadlocking on crosslinked metadata buffers; either the + * caller passes one in (bmap scrub) or we have to create a transaction + * ourselves. Returns ENOENT if the perag struct cannot be grabbed. + */ +int +xchk_ag_init( + struct xfs_scrub *sc, + xfs_agnumber_t agno, + struct xchk_ag *sa) +{ + int error; + + error = xchk_ag_read_headers(sc, agno, sa); + if (error) + return error; + + xchk_ag_btcur_init(sc, sa); + return 0; +} + +/* Per-scrubber setup functions */ + +/* + * Grab an empty transaction so that we can re-grab locked buffers if + * one of our btrees turns out to be cyclic. + * + * If we're going to repair something, we need to ask for the largest possible + * log reservation so that we can handle the worst case scenario for metadata + * updates while rebuilding a metadata item. We also need to reserve as many + * blocks in the head transaction as we think we're going to need to rebuild + * the metadata object. + */ +int +xchk_trans_alloc( + struct xfs_scrub *sc, + uint resblks) +{ + if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) + return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate, + resblks, 0, 0, &sc->tp); + + return xfs_trans_alloc_empty(sc->mp, &sc->tp); +} + +/* Set us up with a transaction and an empty context. */ +int +xchk_setup_fs( + struct xfs_scrub *sc) +{ + uint resblks; + + resblks = xrep_calc_ag_resblks(sc); + return xchk_trans_alloc(sc, resblks); +} + +/* Set us up with AG headers and btree cursors. */ +int +xchk_setup_ag_btree( + struct xfs_scrub *sc, + bool force_log) +{ + struct xfs_mount *mp = sc->mp; + int error; + + /* + * If the caller asks us to checkpont the log, do so. This + * expensive operation should be performed infrequently and only + * as a last resort. Any caller that sets force_log should + * document why they need to do so. + */ + if (force_log) { + error = xchk_checkpoint_log(mp); + if (error) + return error; + } + + error = xchk_setup_fs(sc); + if (error) + return error; + + return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa); +} + +/* Push everything out of the log onto disk. */ +int +xchk_checkpoint_log( + struct xfs_mount *mp) +{ + int error; + + error = xfs_log_force(mp, XFS_LOG_SYNC); + if (error) + return error; + xfs_ail_push_all_sync(mp->m_ail); + return 0; +} + +/* + * Given an inode and the scrub control structure, grab either the + * inode referenced in the control structure or the inode passed in. + * The inode is not locked. + */ +int +xchk_get_inode( + struct xfs_scrub *sc) +{ + struct xfs_imap imap; + struct xfs_mount *mp = sc->mp; + struct xfs_inode *ip_in = XFS_I(file_inode(sc->file)); + struct xfs_inode *ip = NULL; + int error; + + /* We want to scan the inode we already had opened. */ + if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) { + sc->ip = ip_in; + return 0; + } + + /* Look up the inode, see if the generation number matches. */ + if (xfs_internal_inum(mp, sc->sm->sm_ino)) + return -ENOENT; + error = xfs_iget(mp, NULL, sc->sm->sm_ino, + XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip); + switch (error) { + case -ENOENT: + /* Inode doesn't exist, just bail out. */ + return error; + case 0: + /* Got an inode, continue. */ + break; + case -EINVAL: + /* + * -EINVAL with IGET_UNTRUSTED could mean one of several + * things: userspace gave us an inode number that doesn't + * correspond to fs space, or doesn't have an inobt entry; + * or it could simply mean that the inode buffer failed the + * read verifiers. + * + * Try just the inode mapping lookup -- if it succeeds, then + * the inode buffer verifier failed and something needs fixing. + * Otherwise, we really couldn't find it so tell userspace + * that it no longer exists. + */ + error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap, + XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE); + if (error) + return -ENOENT; + error = -EFSCORRUPTED; + fallthrough; + default: + trace_xchk_op_error(sc, + XFS_INO_TO_AGNO(mp, sc->sm->sm_ino), + XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino), + error, __return_address); + return error; + } + if (VFS_I(ip)->i_generation != sc->sm->sm_gen) { + xfs_irele(ip); + return -ENOENT; + } + + sc->ip = ip; + return 0; +} + +/* Set us up to scrub a file's contents. */ +int +xchk_setup_inode_contents( + struct xfs_scrub *sc, + unsigned int resblks) +{ + int error; + + error = xchk_get_inode(sc); + if (error) + return error; + + /* Got the inode, lock it and we're ready to go. */ + sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; + xfs_ilock(sc->ip, sc->ilock_flags); + error = xchk_trans_alloc(sc, resblks); + if (error) + goto out; + sc->ilock_flags |= XFS_ILOCK_EXCL; + xfs_ilock(sc->ip, XFS_ILOCK_EXCL); + +out: + /* scrub teardown will unlock and release the inode for us */ + return error; +} + +/* + * Predicate that decides if we need to evaluate the cross-reference check. + * If there was an error accessing the cross-reference btree, just delete + * the cursor and skip the check. + */ +bool +xchk_should_check_xref( + struct xfs_scrub *sc, + int *error, + struct xfs_btree_cur **curpp) +{ + /* No point in xref if we already know we're corrupt. */ + if (xchk_skip_xref(sc->sm)) + return false; + + if (*error == 0) + return true; + + if (curpp) { + /* If we've already given up on xref, just bail out. */ + if (!*curpp) + return false; + + /* xref error, delete cursor and bail out. */ + xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR); + *curpp = NULL; + } + + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL; + trace_xchk_xref_error(sc, *error, __return_address); + + /* + * Errors encountered during cross-referencing with another + * data structure should not cause this scrubber to abort. + */ + *error = 0; + return false; +} + +/* Run the structure verifiers on in-memory buffers to detect bad memory. */ +void +xchk_buffer_recheck( + struct xfs_scrub *sc, + struct xfs_buf *bp) +{ + xfs_failaddr_t fa; + + if (bp->b_ops == NULL) { + xchk_block_set_corrupt(sc, bp); + return; + } + if (bp->b_ops->verify_struct == NULL) { + xchk_set_incomplete(sc); + return; + } + fa = bp->b_ops->verify_struct(bp); + if (!fa) + return; + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; + trace_xchk_block_error(sc, xfs_buf_daddr(bp), fa); +} + +/* + * Scrub the attr/data forks of a metadata inode. The metadata inode must be + * pointed to by sc->ip and the ILOCK must be held. + */ +int +xchk_metadata_inode_forks( + struct xfs_scrub *sc) +{ + __u32 smtype; + bool shared; + int error; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return 0; + + /* Metadata inodes don't live on the rt device. */ + if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) { + xchk_ino_set_corrupt(sc, sc->ip->i_ino); + return 0; + } + + /* They should never participate in reflink. */ + if (xfs_is_reflink_inode(sc->ip)) { + xchk_ino_set_corrupt(sc, sc->ip->i_ino); + return 0; + } + + /* They also should never have extended attributes. */ + if (xfs_inode_hasattr(sc->ip)) { + xchk_ino_set_corrupt(sc, sc->ip->i_ino); + return 0; + } + + /* Invoke the data fork scrubber. */ + smtype = sc->sm->sm_type; + sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD; + error = xchk_bmap_data(sc); + sc->sm->sm_type = smtype; + if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + return error; + + /* Look for incorrect shared blocks. */ + if (xfs_has_reflink(sc->mp)) { + error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip, + &shared); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, + &error)) + return error; + if (shared) + xchk_ino_set_corrupt(sc, sc->ip->i_ino); + } + + return error; +} + +/* + * Try to lock an inode in violation of the usual locking order rules. For + * example, trying to get the IOLOCK while in transaction context, or just + * plain breaking AG-order or inode-order inode locking rules. Either way, + * the only way to avoid an ABBA deadlock is to use trylock and back off if + * we can't. + */ +int +xchk_ilock_inverted( + struct xfs_inode *ip, + uint lock_mode) +{ + int i; + + for (i = 0; i < 20; i++) { + if (xfs_ilock_nowait(ip, lock_mode)) + return 0; + delay(1); + } + return -EDEADLOCK; +} diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h new file mode 100644 index 000000000..2ca80102e --- /dev/null +++ b/fs/xfs/scrub/common.h @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_COMMON_H__ +#define __XFS_SCRUB_COMMON_H__ + +/* + * We /could/ terminate a scrub/repair operation early. If we're not + * in a good place to continue (fatal signal, etc.) then bail out. + * Note that we're careful not to make any judgements about *error. + */ +static inline bool +xchk_should_terminate( + struct xfs_scrub *sc, + int *error) +{ + /* + * If preemption is disabled, we need to yield to the scheduler every + * few seconds so that we don't run afoul of the soft lockup watchdog + * or RCU stall detector. + */ + cond_resched(); + + if (fatal_signal_pending(current)) { + if (*error == 0) + *error = -EAGAIN; + return true; + } + return false; +} + +int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks); +bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno, + xfs_agblock_t bno, int *error); +bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork, + xfs_fileoff_t offset, int *error); + +bool xchk_xref_process_error(struct xfs_scrub *sc, + xfs_agnumber_t agno, xfs_agblock_t bno, int *error); +bool xchk_fblock_xref_process_error(struct xfs_scrub *sc, + int whichfork, xfs_fileoff_t offset, int *error); + +void xchk_block_set_preen(struct xfs_scrub *sc, + struct xfs_buf *bp); +void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino); + +void xchk_set_corrupt(struct xfs_scrub *sc); +void xchk_block_set_corrupt(struct xfs_scrub *sc, + struct xfs_buf *bp); +void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino); +void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork, + xfs_fileoff_t offset); + +void xchk_block_xref_set_corrupt(struct xfs_scrub *sc, + struct xfs_buf *bp); +void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc, + xfs_ino_t ino); +void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc, + int whichfork, xfs_fileoff_t offset); + +void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino); +void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork, + xfs_fileoff_t offset); + +void xchk_set_incomplete(struct xfs_scrub *sc); +int xchk_checkpoint_log(struct xfs_mount *mp); + +/* Are we set up for a cross-referencing check? */ +bool xchk_should_check_xref(struct xfs_scrub *sc, int *error, + struct xfs_btree_cur **curpp); + +/* Setup functions */ +int xchk_setup_fs(struct xfs_scrub *sc); +int xchk_setup_ag_allocbt(struct xfs_scrub *sc); +int xchk_setup_ag_iallocbt(struct xfs_scrub *sc); +int xchk_setup_ag_rmapbt(struct xfs_scrub *sc); +int xchk_setup_ag_refcountbt(struct xfs_scrub *sc); +int xchk_setup_inode(struct xfs_scrub *sc); +int xchk_setup_inode_bmap(struct xfs_scrub *sc); +int xchk_setup_inode_bmap_data(struct xfs_scrub *sc); +int xchk_setup_directory(struct xfs_scrub *sc); +int xchk_setup_xattr(struct xfs_scrub *sc); +int xchk_setup_symlink(struct xfs_scrub *sc); +int xchk_setup_parent(struct xfs_scrub *sc); +#ifdef CONFIG_XFS_RT +int xchk_setup_rt(struct xfs_scrub *sc); +#else +static inline int +xchk_setup_rt(struct xfs_scrub *sc) +{ + return -ENOENT; +} +#endif +#ifdef CONFIG_XFS_QUOTA +int xchk_setup_quota(struct xfs_scrub *sc); +#else +static inline int +xchk_setup_quota(struct xfs_scrub *sc) +{ + return -ENOENT; +} +#endif +int xchk_setup_fscounters(struct xfs_scrub *sc); + +void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa); +int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno, + struct xchk_ag *sa); + +/* + * Grab all AG resources, treating the inability to grab the perag structure as + * a fs corruption. This is intended for callers checking an ondisk reference + * to a given AG, which means that the AG must still exist. + */ +static inline int +xchk_ag_init_existing( + struct xfs_scrub *sc, + xfs_agnumber_t agno, + struct xchk_ag *sa) +{ + int error = xchk_ag_init(sc, agno, sa); + + return error == -ENOENT ? -EFSCORRUPTED : error; +} + +int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno, + struct xchk_ag *sa); +void xchk_ag_btcur_free(struct xchk_ag *sa); +void xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa); +int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur, + const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks); + +int xchk_setup_ag_btree(struct xfs_scrub *sc, bool force_log); +int xchk_get_inode(struct xfs_scrub *sc); +int xchk_setup_inode_contents(struct xfs_scrub *sc, unsigned int resblks); +void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp); + +/* + * Don't bother cross-referencing if we already found corruption or cross + * referencing discrepancies. + */ +static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm) +{ + return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | + XFS_SCRUB_OFLAG_XCORRUPT); +} + +int xchk_metadata_inode_forks(struct xfs_scrub *sc); +int xchk_ilock_inverted(struct xfs_inode *ip, uint lock_mode); + +#endif /* __XFS_SCRUB_COMMON_H__ */ diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c new file mode 100644 index 000000000..84fe3d33d --- /dev/null +++ b/fs/xfs/scrub/dabtree.c @@ -0,0 +1,596 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_inode.h" +#include "xfs_dir2.h" +#include "xfs_dir2_priv.h" +#include "xfs_attr_leaf.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/trace.h" +#include "scrub/dabtree.h" + +/* Directory/Attribute Btree */ + +/* + * Check for da btree operation errors. See the section about handling + * operational errors in common.c. + */ +bool +xchk_da_process_error( + struct xchk_da_btree *ds, + int level, + int *error) +{ + struct xfs_scrub *sc = ds->sc; + + if (*error == 0) + return true; + + switch (*error) { + case -EDEADLOCK: + /* Used to restart an op with deadlock avoidance. */ + trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); + break; + case -EFSBADCRC: + case -EFSCORRUPTED: + /* Note the badness but don't abort. */ + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; + *error = 0; + fallthrough; + default: + trace_xchk_file_op_error(sc, ds->dargs.whichfork, + xfs_dir2_da_to_db(ds->dargs.geo, + ds->state->path.blk[level].blkno), + *error, __return_address); + break; + } + return false; +} + +/* + * Check for da btree corruption. See the section about handling + * operational errors in common.c. + */ +void +xchk_da_set_corrupt( + struct xchk_da_btree *ds, + int level) +{ + struct xfs_scrub *sc = ds->sc; + + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; + + trace_xchk_fblock_error(sc, ds->dargs.whichfork, + xfs_dir2_da_to_db(ds->dargs.geo, + ds->state->path.blk[level].blkno), + __return_address); +} + +static struct xfs_da_node_entry * +xchk_da_btree_node_entry( + struct xchk_da_btree *ds, + int level) +{ + struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; + struct xfs_da3_icnode_hdr hdr; + + ASSERT(blk->magic == XFS_DA_NODE_MAGIC); + + xfs_da3_node_hdr_from_disk(ds->sc->mp, &hdr, blk->bp->b_addr); + return hdr.btree + blk->index; +} + +/* Scrub a da btree hash (key). */ +int +xchk_da_btree_hash( + struct xchk_da_btree *ds, + int level, + __be32 *hashp) +{ + struct xfs_da_node_entry *entry; + xfs_dahash_t hash; + xfs_dahash_t parent_hash; + + /* Is this hash in order? */ + hash = be32_to_cpu(*hashp); + if (hash < ds->hashes[level]) + xchk_da_set_corrupt(ds, level); + ds->hashes[level] = hash; + + if (level == 0) + return 0; + + /* Is this hash no larger than the parent hash? */ + entry = xchk_da_btree_node_entry(ds, level - 1); + parent_hash = be32_to_cpu(entry->hashval); + if (parent_hash < hash) + xchk_da_set_corrupt(ds, level); + + return 0; +} + +/* + * Check a da btree pointer. Returns true if it's ok to use this + * pointer. + */ +STATIC bool +xchk_da_btree_ptr_ok( + struct xchk_da_btree *ds, + int level, + xfs_dablk_t blkno) +{ + if (blkno < ds->lowest || (ds->highest != 0 && blkno >= ds->highest)) { + xchk_da_set_corrupt(ds, level); + return false; + } + + return true; +} + +/* + * The da btree scrubber can handle leaf1 blocks as a degenerate + * form of leafn blocks. Since the regular da code doesn't handle + * leaf1, we must multiplex the verifiers. + */ +static void +xchk_da_btree_read_verify( + struct xfs_buf *bp) +{ + struct xfs_da_blkinfo *info = bp->b_addr; + + switch (be16_to_cpu(info->magic)) { + case XFS_DIR2_LEAF1_MAGIC: + case XFS_DIR3_LEAF1_MAGIC: + bp->b_ops = &xfs_dir3_leaf1_buf_ops; + bp->b_ops->verify_read(bp); + return; + default: + /* + * xfs_da3_node_buf_ops already know how to handle + * DA*_NODE, ATTR*_LEAF, and DIR*_LEAFN blocks. + */ + bp->b_ops = &xfs_da3_node_buf_ops; + bp->b_ops->verify_read(bp); + return; + } +} +static void +xchk_da_btree_write_verify( + struct xfs_buf *bp) +{ + struct xfs_da_blkinfo *info = bp->b_addr; + + switch (be16_to_cpu(info->magic)) { + case XFS_DIR2_LEAF1_MAGIC: + case XFS_DIR3_LEAF1_MAGIC: + bp->b_ops = &xfs_dir3_leaf1_buf_ops; + bp->b_ops->verify_write(bp); + return; + default: + /* + * xfs_da3_node_buf_ops already know how to handle + * DA*_NODE, ATTR*_LEAF, and DIR*_LEAFN blocks. + */ + bp->b_ops = &xfs_da3_node_buf_ops; + bp->b_ops->verify_write(bp); + return; + } +} +static void * +xchk_da_btree_verify( + struct xfs_buf *bp) +{ + struct xfs_da_blkinfo *info = bp->b_addr; + + switch (be16_to_cpu(info->magic)) { + case XFS_DIR2_LEAF1_MAGIC: + case XFS_DIR3_LEAF1_MAGIC: + bp->b_ops = &xfs_dir3_leaf1_buf_ops; + return bp->b_ops->verify_struct(bp); + default: + bp->b_ops = &xfs_da3_node_buf_ops; + return bp->b_ops->verify_struct(bp); + } +} + +static const struct xfs_buf_ops xchk_da_btree_buf_ops = { + .name = "xchk_da_btree", + .verify_read = xchk_da_btree_read_verify, + .verify_write = xchk_da_btree_write_verify, + .verify_struct = xchk_da_btree_verify, +}; + +/* Check a block's sibling. */ +STATIC int +xchk_da_btree_block_check_sibling( + struct xchk_da_btree *ds, + int level, + int direction, + xfs_dablk_t sibling) +{ + struct xfs_da_state_path *path = &ds->state->path; + struct xfs_da_state_path *altpath = &ds->state->altpath; + int retval; + int plevel; + int error; + + memcpy(altpath, path, sizeof(ds->state->altpath)); + + /* + * If the pointer is null, we shouldn't be able to move the upper + * level pointer anywhere. + */ + if (sibling == 0) { + error = xfs_da3_path_shift(ds->state, altpath, direction, + false, &retval); + if (error == 0 && retval == 0) + xchk_da_set_corrupt(ds, level); + error = 0; + goto out; + } + + /* Move the alternate cursor one block in the direction given. */ + error = xfs_da3_path_shift(ds->state, altpath, direction, false, + &retval); + if (!xchk_da_process_error(ds, level, &error)) + goto out; + if (retval) { + xchk_da_set_corrupt(ds, level); + goto out; + } + if (altpath->blk[level].bp) + xchk_buffer_recheck(ds->sc, altpath->blk[level].bp); + + /* Compare upper level pointer to sibling pointer. */ + if (altpath->blk[level].blkno != sibling) + xchk_da_set_corrupt(ds, level); + +out: + /* Free all buffers in the altpath that aren't referenced from path. */ + for (plevel = 0; plevel < altpath->active; plevel++) { + if (altpath->blk[plevel].bp == NULL || + (plevel < path->active && + altpath->blk[plevel].bp == path->blk[plevel].bp)) + continue; + + xfs_trans_brelse(ds->dargs.trans, altpath->blk[plevel].bp); + altpath->blk[plevel].bp = NULL; + } + + return error; +} + +/* Check a block's sibling pointers. */ +STATIC int +xchk_da_btree_block_check_siblings( + struct xchk_da_btree *ds, + int level, + struct xfs_da_blkinfo *hdr) +{ + xfs_dablk_t forw; + xfs_dablk_t back; + int error = 0; + + forw = be32_to_cpu(hdr->forw); + back = be32_to_cpu(hdr->back); + + /* Top level blocks should not have sibling pointers. */ + if (level == 0) { + if (forw != 0 || back != 0) + xchk_da_set_corrupt(ds, level); + return 0; + } + + /* + * Check back (left) and forw (right) pointers. These functions + * absorb error codes for us. + */ + error = xchk_da_btree_block_check_sibling(ds, level, 0, back); + if (error) + goto out; + error = xchk_da_btree_block_check_sibling(ds, level, 1, forw); + +out: + memset(&ds->state->altpath, 0, sizeof(ds->state->altpath)); + return error; +} + +/* Load a dir/attribute block from a btree. */ +STATIC int +xchk_da_btree_block( + struct xchk_da_btree *ds, + int level, + xfs_dablk_t blkno) +{ + struct xfs_da_state_blk *blk; + struct xfs_da_intnode *node; + struct xfs_da_node_entry *btree; + struct xfs_da3_blkinfo *hdr3; + struct xfs_da_args *dargs = &ds->dargs; + struct xfs_inode *ip = ds->dargs.dp; + xfs_ino_t owner; + int *pmaxrecs; + struct xfs_da3_icnode_hdr nodehdr; + int error = 0; + + blk = &ds->state->path.blk[level]; + ds->state->path.active = level + 1; + + /* Release old block. */ + if (blk->bp) { + xfs_trans_brelse(dargs->trans, blk->bp); + blk->bp = NULL; + } + + /* Check the pointer. */ + blk->blkno = blkno; + if (!xchk_da_btree_ptr_ok(ds, level, blkno)) + goto out_nobuf; + + /* Read the buffer. */ + error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno, + XFS_DABUF_MAP_HOLE_OK, &blk->bp, dargs->whichfork, + &xchk_da_btree_buf_ops); + if (!xchk_da_process_error(ds, level, &error)) + goto out_nobuf; + if (blk->bp) + xchk_buffer_recheck(ds->sc, blk->bp); + + /* + * We didn't find a dir btree root block, which means that + * there's no LEAF1/LEAFN tree (at least not where it's supposed + * to be), so jump out now. + */ + if (ds->dargs.whichfork == XFS_DATA_FORK && level == 0 && + blk->bp == NULL) + goto out_nobuf; + + /* It's /not/ ok for attr trees not to have a da btree. */ + if (blk->bp == NULL) { + xchk_da_set_corrupt(ds, level); + goto out_nobuf; + } + + hdr3 = blk->bp->b_addr; + blk->magic = be16_to_cpu(hdr3->hdr.magic); + pmaxrecs = &ds->maxrecs[level]; + + /* We only started zeroing the header on v5 filesystems. */ + if (xfs_has_crc(ds->sc->mp) && hdr3->hdr.pad) + xchk_da_set_corrupt(ds, level); + + /* Check the owner. */ + if (xfs_has_crc(ip->i_mount)) { + owner = be64_to_cpu(hdr3->owner); + if (owner != ip->i_ino) + xchk_da_set_corrupt(ds, level); + } + + /* Check the siblings. */ + error = xchk_da_btree_block_check_siblings(ds, level, &hdr3->hdr); + if (error) + goto out; + + /* Interpret the buffer. */ + switch (blk->magic) { + case XFS_ATTR_LEAF_MAGIC: + case XFS_ATTR3_LEAF_MAGIC: + xfs_trans_buf_set_type(dargs->trans, blk->bp, + XFS_BLFT_ATTR_LEAF_BUF); + blk->magic = XFS_ATTR_LEAF_MAGIC; + blk->hashval = xfs_attr_leaf_lasthash(blk->bp, pmaxrecs); + if (ds->tree_level != 0) + xchk_da_set_corrupt(ds, level); + break; + case XFS_DIR2_LEAFN_MAGIC: + case XFS_DIR3_LEAFN_MAGIC: + xfs_trans_buf_set_type(dargs->trans, blk->bp, + XFS_BLFT_DIR_LEAFN_BUF); + blk->magic = XFS_DIR2_LEAFN_MAGIC; + blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs); + if (ds->tree_level != 0) + xchk_da_set_corrupt(ds, level); + break; + case XFS_DIR2_LEAF1_MAGIC: + case XFS_DIR3_LEAF1_MAGIC: + xfs_trans_buf_set_type(dargs->trans, blk->bp, + XFS_BLFT_DIR_LEAF1_BUF); + blk->magic = XFS_DIR2_LEAF1_MAGIC; + blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs); + if (ds->tree_level != 0) + xchk_da_set_corrupt(ds, level); + break; + case XFS_DA_NODE_MAGIC: + case XFS_DA3_NODE_MAGIC: + xfs_trans_buf_set_type(dargs->trans, blk->bp, + XFS_BLFT_DA_NODE_BUF); + blk->magic = XFS_DA_NODE_MAGIC; + node = blk->bp->b_addr; + xfs_da3_node_hdr_from_disk(ip->i_mount, &nodehdr, node); + btree = nodehdr.btree; + *pmaxrecs = nodehdr.count; + blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval); + if (level == 0) { + if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) { + xchk_da_set_corrupt(ds, level); + goto out_freebp; + } + ds->tree_level = nodehdr.level; + } else { + if (ds->tree_level != nodehdr.level) { + xchk_da_set_corrupt(ds, level); + goto out_freebp; + } + } + + /* XXX: Check hdr3.pad32 once we know how to fix it. */ + break; + default: + xchk_da_set_corrupt(ds, level); + goto out_freebp; + } + + /* + * If we've been handed a block that is below the dabtree root, does + * its hashval match what the parent block expected to see? + */ + if (level > 0) { + struct xfs_da_node_entry *key; + + key = xchk_da_btree_node_entry(ds, level - 1); + if (be32_to_cpu(key->hashval) != blk->hashval) { + xchk_da_set_corrupt(ds, level); + goto out_freebp; + } + } + +out: + return error; +out_freebp: + xfs_trans_brelse(dargs->trans, blk->bp); + blk->bp = NULL; +out_nobuf: + blk->blkno = 0; + return error; +} + +/* Visit all nodes and leaves of a da btree. */ +int +xchk_da_btree( + struct xfs_scrub *sc, + int whichfork, + xchk_da_btree_rec_fn scrub_fn, + void *private) +{ + struct xchk_da_btree *ds; + struct xfs_mount *mp = sc->mp; + struct xfs_da_state_blk *blks; + struct xfs_da_node_entry *key; + xfs_dablk_t blkno; + int level; + int error; + + /* Skip short format data structures; no btree to scan. */ + if (!xfs_ifork_has_extents(xfs_ifork_ptr(sc->ip, whichfork))) + return 0; + + /* Set up initial da state. */ + ds = kmem_zalloc(sizeof(struct xchk_da_btree), KM_NOFS | KM_MAYFAIL); + if (!ds) + return -ENOMEM; + ds->dargs.dp = sc->ip; + ds->dargs.whichfork = whichfork; + ds->dargs.trans = sc->tp; + ds->dargs.op_flags = XFS_DA_OP_OKNOENT; + ds->state = xfs_da_state_alloc(&ds->dargs); + ds->sc = sc; + ds->private = private; + if (whichfork == XFS_ATTR_FORK) { + ds->dargs.geo = mp->m_attr_geo; + ds->lowest = 0; + ds->highest = 0; + } else { + ds->dargs.geo = mp->m_dir_geo; + ds->lowest = ds->dargs.geo->leafblk; + ds->highest = ds->dargs.geo->freeblk; + } + blkno = ds->lowest; + level = 0; + + /* Find the root of the da tree, if present. */ + blks = ds->state->path.blk; + error = xchk_da_btree_block(ds, level, blkno); + if (error) + goto out_state; + /* + * We didn't find a block at ds->lowest, which means that there's + * no LEAF1/LEAFN tree (at least not where it's supposed to be), + * so jump out now. + */ + if (blks[level].bp == NULL) + goto out_state; + + blks[level].index = 0; + while (level >= 0 && level < XFS_DA_NODE_MAXDEPTH) { + /* Handle leaf block. */ + if (blks[level].magic != XFS_DA_NODE_MAGIC) { + /* End of leaf, pop back towards the root. */ + if (blks[level].index >= ds->maxrecs[level]) { + if (level > 0) + blks[level - 1].index++; + ds->tree_level++; + level--; + continue; + } + + /* Dispatch record scrubbing. */ + error = scrub_fn(ds, level); + if (error) + break; + if (xchk_should_terminate(sc, &error) || + (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + break; + + blks[level].index++; + continue; + } + + + /* End of node, pop back towards the root. */ + if (blks[level].index >= ds->maxrecs[level]) { + if (level > 0) + blks[level - 1].index++; + ds->tree_level++; + level--; + continue; + } + + /* Hashes in order for scrub? */ + key = xchk_da_btree_node_entry(ds, level); + error = xchk_da_btree_hash(ds, level, &key->hashval); + if (error) + goto out; + + /* Drill another level deeper. */ + blkno = be32_to_cpu(key->before); + level++; + if (level >= XFS_DA_NODE_MAXDEPTH) { + /* Too deep! */ + xchk_da_set_corrupt(ds, level - 1); + break; + } + ds->tree_level--; + error = xchk_da_btree_block(ds, level, blkno); + if (error) + goto out; + if (blks[level].bp == NULL) + goto out; + + blks[level].index = 0; + } + +out: + /* Release all the buffers we're tracking. */ + for (level = 0; level < XFS_DA_NODE_MAXDEPTH; level++) { + if (blks[level].bp == NULL) + continue; + xfs_trans_brelse(sc->tp, blks[level].bp); + blks[level].bp = NULL; + } + +out_state: + xfs_da_state_free(ds->state); + kmem_free(ds); + return error; +} diff --git a/fs/xfs/scrub/dabtree.h b/fs/xfs/scrub/dabtree.h new file mode 100644 index 000000000..1f3515c6d --- /dev/null +++ b/fs/xfs/scrub/dabtree.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_DABTREE_H__ +#define __XFS_SCRUB_DABTREE_H__ + +/* dir/attr btree */ + +struct xchk_da_btree { + struct xfs_da_args dargs; + xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH]; + int maxrecs[XFS_DA_NODE_MAXDEPTH]; + struct xfs_da_state *state; + struct xfs_scrub *sc; + void *private; + + /* + * Lowest and highest directory block address in which we expect + * to find dir/attr btree node blocks. For a directory this + * (presumably) means between LEAF_OFFSET and FREE_OFFSET; for + * attributes there is no limit. + */ + xfs_dablk_t lowest; + xfs_dablk_t highest; + + int tree_level; +}; + +typedef int (*xchk_da_btree_rec_fn)(struct xchk_da_btree *ds, int level); + +/* Check for da btree operation errors. */ +bool xchk_da_process_error(struct xchk_da_btree *ds, int level, int *error); + +/* Check for da btree corruption. */ +void xchk_da_set_corrupt(struct xchk_da_btree *ds, int level); + +int xchk_da_btree_hash(struct xchk_da_btree *ds, int level, __be32 *hashp); +int xchk_da_btree(struct xfs_scrub *sc, int whichfork, + xchk_da_btree_rec_fn scrub_fn, void *private); + +#endif /* __XFS_SCRUB_DABTREE_H__ */ diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c new file mode 100644 index 000000000..5c87800ab --- /dev/null +++ b/fs/xfs/scrub/dir.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_inode.h" +#include "xfs_icache.h" +#include "xfs_dir2.h" +#include "xfs_dir2_priv.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/dabtree.h" + +/* Set us up to scrub directories. */ +int +xchk_setup_directory( + struct xfs_scrub *sc) +{ + return xchk_setup_inode_contents(sc, 0); +} + +/* Directories */ + +/* Scrub a directory entry. */ + +struct xchk_dir_ctx { + /* VFS fill-directory iterator */ + struct dir_context dir_iter; + + struct xfs_scrub *sc; +}; + +/* Check that an inode's mode matches a given DT_ type. */ +STATIC int +xchk_dir_check_ftype( + struct xchk_dir_ctx *sdc, + xfs_fileoff_t offset, + xfs_ino_t inum, + int dtype) +{ + struct xfs_mount *mp = sdc->sc->mp; + struct xfs_inode *ip; + int ino_dtype; + int error = 0; + + if (!xfs_has_ftype(mp)) { + if (dtype != DT_UNKNOWN && dtype != DT_DIR) + xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, + offset); + goto out; + } + + /* + * Grab the inode pointed to by the dirent. We release the + * inode before we cancel the scrub transaction. Since we're + * don't know a priori that releasing the inode won't trigger + * eofblocks cleanup (which allocates what would be a nested + * transaction), we can't use DONTCACHE here because DONTCACHE + * inodes can trigger immediate inactive cleanup of the inode. + * + * If _iget returns -EINVAL or -ENOENT then the child inode number is + * garbage and the directory is corrupt. If the _iget returns + * -EFSCORRUPTED or -EFSBADCRC then the child is corrupt which is a + * cross referencing error. Any other error is an operational error. + */ + error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip); + if (error == -EINVAL || error == -ENOENT) { + error = -EFSCORRUPTED; + xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, 0, &error); + goto out; + } + if (!xchk_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset, + &error)) + goto out; + + /* Convert mode to the DT_* values that dir_emit uses. */ + ino_dtype = xfs_dir3_get_dtype(mp, + xfs_mode_to_ftype(VFS_I(ip)->i_mode)); + if (ino_dtype != dtype) + xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset); + xfs_irele(ip); +out: + return error; +} + +/* + * Scrub a single directory entry. + * + * We use the VFS directory iterator (i.e. readdir) to call this + * function for every directory entry in a directory. Once we're here, + * we check the inode number to make sure it's sane, then we check that + * we can look up this filename. Finally, we check the ftype. + */ +STATIC bool +xchk_dir_actor( + struct dir_context *dir_iter, + const char *name, + int namelen, + loff_t pos, + u64 ino, + unsigned type) +{ + struct xfs_mount *mp; + struct xfs_inode *ip; + struct xchk_dir_ctx *sdc; + struct xfs_name xname; + xfs_ino_t lookup_ino; + xfs_dablk_t offset; + bool checked_ftype = false; + int error = 0; + + sdc = container_of(dir_iter, struct xchk_dir_ctx, dir_iter); + ip = sdc->sc->ip; + mp = ip->i_mount; + offset = xfs_dir2_db_to_da(mp->m_dir_geo, + xfs_dir2_dataptr_to_db(mp->m_dir_geo, pos)); + + if (xchk_should_terminate(sdc->sc, &error)) + return !error; + + /* Does this inode number make sense? */ + if (!xfs_verify_dir_ino(mp, ino)) { + xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset); + goto out; + } + + /* Does this name make sense? */ + if (!xfs_dir2_namecheck(name, namelen)) { + xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset); + goto out; + } + + if (!strncmp(".", name, namelen)) { + /* If this is "." then check that the inum matches the dir. */ + if (xfs_has_ftype(mp) && type != DT_DIR) + xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, + offset); + checked_ftype = true; + if (ino != ip->i_ino) + xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, + offset); + } else if (!strncmp("..", name, namelen)) { + /* + * If this is ".." in the root inode, check that the inum + * matches this dir. + */ + if (xfs_has_ftype(mp) && type != DT_DIR) + xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, + offset); + checked_ftype = true; + if (ip->i_ino == mp->m_sb.sb_rootino && ino != ip->i_ino) + xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, + offset); + } + + /* Verify that we can look up this name by hash. */ + xname.name = name; + xname.len = namelen; + xname.type = XFS_DIR3_FT_UNKNOWN; + + error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL); + /* ENOENT means the hash lookup failed and the dir is corrupt */ + if (error == -ENOENT) + error = -EFSCORRUPTED; + if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset, + &error)) + goto out; + if (lookup_ino != ino) { + xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset); + goto out; + } + + /* Verify the file type. This function absorbs error codes. */ + if (!checked_ftype) { + error = xchk_dir_check_ftype(sdc, offset, lookup_ino, type); + if (error) + goto out; + } +out: + /* + * A negative error code returned here is supposed to cause the + * dir_emit caller (xfs_readdir) to abort the directory iteration + * and return zero to xchk_directory. + */ + if (error == 0 && sdc->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return false; + return !error; +} + +/* Scrub a directory btree record. */ +STATIC int +xchk_dir_rec( + struct xchk_da_btree *ds, + int level) +{ + struct xfs_da_state_blk *blk = &ds->state->path.blk[level]; + struct xfs_mount *mp = ds->state->mp; + struct xfs_inode *dp = ds->dargs.dp; + struct xfs_da_geometry *geo = mp->m_dir_geo; + struct xfs_dir2_data_entry *dent; + struct xfs_buf *bp; + struct xfs_dir2_leaf_entry *ent; + unsigned int end; + unsigned int iter_off; + xfs_ino_t ino; + xfs_dablk_t rec_bno; + xfs_dir2_db_t db; + xfs_dir2_data_aoff_t off; + xfs_dir2_dataptr_t ptr; + xfs_dahash_t calc_hash; + xfs_dahash_t hash; + struct xfs_dir3_icleaf_hdr hdr; + unsigned int tag; + int error; + + ASSERT(blk->magic == XFS_DIR2_LEAF1_MAGIC || + blk->magic == XFS_DIR2_LEAFN_MAGIC); + + xfs_dir2_leaf_hdr_from_disk(mp, &hdr, blk->bp->b_addr); + ent = hdr.ents + blk->index; + + /* Check the hash of the entry. */ + error = xchk_da_btree_hash(ds, level, &ent->hashval); + if (error) + goto out; + + /* Valid hash pointer? */ + ptr = be32_to_cpu(ent->address); + if (ptr == 0) + return 0; + + /* Find the directory entry's location. */ + db = xfs_dir2_dataptr_to_db(geo, ptr); + off = xfs_dir2_dataptr_to_off(geo, ptr); + rec_bno = xfs_dir2_db_to_da(geo, db); + + if (rec_bno >= geo->leafblk) { + xchk_da_set_corrupt(ds, level); + goto out; + } + error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno, + XFS_DABUF_MAP_HOLE_OK, &bp); + if (!xchk_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno, + &error)) + goto out; + if (!bp) { + xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); + goto out; + } + xchk_buffer_recheck(ds->sc, bp); + + if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out_relse; + + dent = bp->b_addr + off; + + /* Make sure we got a real directory entry. */ + iter_off = geo->data_entry_offset; + end = xfs_dir3_data_end_offset(geo, bp->b_addr); + if (!end) { + xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); + goto out_relse; + } + for (;;) { + struct xfs_dir2_data_entry *dep = bp->b_addr + iter_off; + struct xfs_dir2_data_unused *dup = bp->b_addr + iter_off; + + if (iter_off >= end) { + xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); + goto out_relse; + } + + if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + iter_off += be16_to_cpu(dup->length); + continue; + } + if (dep == dent) + break; + iter_off += xfs_dir2_data_entsize(mp, dep->namelen); + } + + /* Retrieve the entry, sanity check it, and compare hashes. */ + ino = be64_to_cpu(dent->inumber); + hash = be32_to_cpu(ent->hashval); + tag = be16_to_cpup(xfs_dir2_data_entry_tag_p(mp, dent)); + if (!xfs_verify_dir_ino(mp, ino) || tag != off) + xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); + if (dent->namelen == 0) { + xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); + goto out_relse; + } + calc_hash = xfs_da_hashname(dent->name, dent->namelen); + if (calc_hash != hash) + xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); + +out_relse: + xfs_trans_brelse(ds->dargs.trans, bp); +out: + return error; +} + +/* + * Is this unused entry either in the bestfree or smaller than all of + * them? We've already checked that the bestfrees are sorted longest to + * shortest, and that there aren't any bogus entries. + */ +STATIC void +xchk_directory_check_free_entry( + struct xfs_scrub *sc, + xfs_dablk_t lblk, + struct xfs_dir2_data_free *bf, + struct xfs_dir2_data_unused *dup) +{ + struct xfs_dir2_data_free *dfp; + unsigned int dup_length; + + dup_length = be16_to_cpu(dup->length); + + /* Unused entry is shorter than any of the bestfrees */ + if (dup_length < be16_to_cpu(bf[XFS_DIR2_DATA_FD_COUNT - 1].length)) + return; + + for (dfp = &bf[XFS_DIR2_DATA_FD_COUNT - 1]; dfp >= bf; dfp--) + if (dup_length == be16_to_cpu(dfp->length)) + return; + + /* Unused entry should be in the bestfrees but wasn't found. */ + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); +} + +/* Check free space info in a directory data block. */ +STATIC int +xchk_directory_data_bestfree( + struct xfs_scrub *sc, + xfs_dablk_t lblk, + bool is_block) +{ + struct xfs_dir2_data_unused *dup; + struct xfs_dir2_data_free *dfp; + struct xfs_buf *bp; + struct xfs_dir2_data_free *bf; + struct xfs_mount *mp = sc->mp; + u16 tag; + unsigned int nr_bestfrees = 0; + unsigned int nr_frees = 0; + unsigned int smallest_bestfree; + int newlen; + unsigned int offset; + unsigned int end; + int error; + + if (is_block) { + /* dir block format */ + if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET)) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + error = xfs_dir3_block_read(sc->tp, sc->ip, &bp); + } else { + /* dir data format */ + error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, 0, &bp); + } + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error)) + goto out; + xchk_buffer_recheck(sc, bp); + + /* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */ + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out_buf; + + /* Do the bestfrees correspond to actual free space? */ + bf = xfs_dir2_data_bestfree_p(mp, bp->b_addr); + smallest_bestfree = UINT_MAX; + for (dfp = &bf[0]; dfp < &bf[XFS_DIR2_DATA_FD_COUNT]; dfp++) { + offset = be16_to_cpu(dfp->offset); + if (offset == 0) + continue; + if (offset >= mp->m_dir_geo->blksize) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out_buf; + } + dup = bp->b_addr + offset; + tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)); + + /* bestfree doesn't match the entry it points at? */ + if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG) || + be16_to_cpu(dup->length) != be16_to_cpu(dfp->length) || + tag != offset) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out_buf; + } + + /* bestfree records should be ordered largest to smallest */ + if (smallest_bestfree < be16_to_cpu(dfp->length)) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out_buf; + } + + smallest_bestfree = be16_to_cpu(dfp->length); + nr_bestfrees++; + } + + /* Make sure the bestfrees are actually the best free spaces. */ + offset = mp->m_dir_geo->data_entry_offset; + end = xfs_dir3_data_end_offset(mp->m_dir_geo, bp->b_addr); + + /* Iterate the entries, stopping when we hit or go past the end. */ + while (offset < end) { + dup = bp->b_addr + offset; + + /* Skip real entries */ + if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG)) { + struct xfs_dir2_data_entry *dep = bp->b_addr + offset; + + newlen = xfs_dir2_data_entsize(mp, dep->namelen); + if (newlen <= 0) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, + lblk); + goto out_buf; + } + offset += newlen; + continue; + } + + /* Spot check this free entry */ + tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)); + if (tag != offset) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out_buf; + } + + /* + * Either this entry is a bestfree or it's smaller than + * any of the bestfrees. + */ + xchk_directory_check_free_entry(sc, lblk, bf, dup); + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out_buf; + + /* Move on. */ + newlen = be16_to_cpu(dup->length); + if (newlen <= 0) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out_buf; + } + offset += newlen; + if (offset <= end) + nr_frees++; + } + + /* We're required to fill all the space. */ + if (offset != end) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + + /* Did we see at least as many free slots as there are bestfrees? */ + if (nr_frees < nr_bestfrees) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); +out_buf: + xfs_trans_brelse(sc->tp, bp); +out: + return error; +} + +/* + * Does the free space length in the free space index block ($len) match + * the longest length in the directory data block's bestfree array? + * Assume that we've already checked that the data block's bestfree + * array is in order. + */ +STATIC void +xchk_directory_check_freesp( + struct xfs_scrub *sc, + xfs_dablk_t lblk, + struct xfs_buf *dbp, + unsigned int len) +{ + struct xfs_dir2_data_free *dfp; + + dfp = xfs_dir2_data_bestfree_p(sc->mp, dbp->b_addr); + + if (len != be16_to_cpu(dfp->length)) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + + if (len > 0 && be16_to_cpu(dfp->offset) == 0) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); +} + +/* Check free space info in a directory leaf1 block. */ +STATIC int +xchk_directory_leaf1_bestfree( + struct xfs_scrub *sc, + struct xfs_da_args *args, + xfs_dir2_db_t last_data_db, + xfs_dablk_t lblk) +{ + struct xfs_dir3_icleaf_hdr leafhdr; + struct xfs_dir2_leaf_tail *ltp; + struct xfs_dir2_leaf *leaf; + struct xfs_buf *dbp; + struct xfs_buf *bp; + struct xfs_da_geometry *geo = sc->mp->m_dir_geo; + __be16 *bestp; + __u16 best; + __u32 hash; + __u32 lasthash = 0; + __u32 bestcount; + unsigned int stale = 0; + int i; + int error; + + /* Read the free space block. */ + error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, &bp); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error)) + return error; + xchk_buffer_recheck(sc, bp); + + leaf = bp->b_addr; + xfs_dir2_leaf_hdr_from_disk(sc->ip->i_mount, &leafhdr, leaf); + ltp = xfs_dir2_leaf_tail_p(geo, leaf); + bestcount = be32_to_cpu(ltp->bestcount); + bestp = xfs_dir2_leaf_bests_p(ltp); + + if (xfs_has_crc(sc->mp)) { + struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr; + + if (hdr3->pad != cpu_to_be32(0)) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + } + + /* + * There must be enough bestfree slots to cover all the directory data + * blocks that we scanned. It is possible for there to be a hole + * between the last data block and i_disk_size. This seems like an + * oversight to the scrub author, but as we have been writing out + * directories like this (and xfs_repair doesn't mind them) for years, + * that's what we have to check. + */ + if (bestcount != last_data_db + 1) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out; + } + + /* Is the leaf count even remotely sane? */ + if (leafhdr.count > geo->leaf_max_ents) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out; + } + + /* Leaves and bests don't overlap in leaf format. */ + if ((char *)&leafhdr.ents[leafhdr.count] > (char *)bestp) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out; + } + + /* Check hash value order, count stale entries. */ + for (i = 0; i < leafhdr.count; i++) { + hash = be32_to_cpu(leafhdr.ents[i].hashval); + if (i > 0 && lasthash > hash) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + lasthash = hash; + if (leafhdr.ents[i].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) + stale++; + } + if (leafhdr.stale != stale) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + /* Check all the bestfree entries. */ + for (i = 0; i < bestcount; i++, bestp++) { + best = be16_to_cpu(*bestp); + error = xfs_dir3_data_read(sc->tp, sc->ip, + xfs_dir2_db_to_da(args->geo, i), + XFS_DABUF_MAP_HOLE_OK, + &dbp); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, + &error)) + break; + + if (!dbp) { + if (best != NULLDATAOFF) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, + lblk); + break; + } + continue; + } + + if (best == NULLDATAOFF) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + else + xchk_directory_check_freesp(sc, lblk, dbp, best); + xfs_trans_brelse(sc->tp, dbp); + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + break; + } +out: + xfs_trans_brelse(sc->tp, bp); + return error; +} + +/* Check free space info in a directory freespace block. */ +STATIC int +xchk_directory_free_bestfree( + struct xfs_scrub *sc, + struct xfs_da_args *args, + xfs_dablk_t lblk) +{ + struct xfs_dir3_icfree_hdr freehdr; + struct xfs_buf *dbp; + struct xfs_buf *bp; + __u16 best; + unsigned int stale = 0; + int i; + int error; + + /* Read the free space block */ + error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error)) + return error; + xchk_buffer_recheck(sc, bp); + + if (xfs_has_crc(sc->mp)) { + struct xfs_dir3_free_hdr *hdr3 = bp->b_addr; + + if (hdr3->pad != cpu_to_be32(0)) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + } + + /* Check all the entries. */ + xfs_dir2_free_hdr_from_disk(sc->ip->i_mount, &freehdr, bp->b_addr); + for (i = 0; i < freehdr.nvalid; i++) { + best = be16_to_cpu(freehdr.bests[i]); + if (best == NULLDATAOFF) { + stale++; + continue; + } + error = xfs_dir3_data_read(sc->tp, sc->ip, + (freehdr.firstdb + i) * args->geo->fsbcount, + 0, &dbp); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, + &error)) + goto out; + xchk_directory_check_freesp(sc, lblk, dbp, best); + xfs_trans_brelse(sc->tp, dbp); + } + + if (freehdr.nused + stale != freehdr.nvalid) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); +out: + xfs_trans_brelse(sc->tp, bp); + return error; +} + +/* Check free space information in directories. */ +STATIC int +xchk_directory_blocks( + struct xfs_scrub *sc) +{ + struct xfs_bmbt_irec got; + struct xfs_da_args args; + struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK); + struct xfs_mount *mp = sc->mp; + xfs_fileoff_t leaf_lblk; + xfs_fileoff_t free_lblk; + xfs_fileoff_t lblk; + struct xfs_iext_cursor icur; + xfs_dablk_t dabno; + xfs_dir2_db_t last_data_db = 0; + bool found; + bool is_block = false; + int error; + + /* Ignore local format directories. */ + if (ifp->if_format != XFS_DINODE_FMT_EXTENTS && + ifp->if_format != XFS_DINODE_FMT_BTREE) + return 0; + + lblk = XFS_B_TO_FSB(mp, XFS_DIR2_DATA_OFFSET); + leaf_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_LEAF_OFFSET); + free_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_FREE_OFFSET); + + /* Is this a block dir? */ + args.dp = sc->ip; + args.geo = mp->m_dir_geo; + args.trans = sc->tp; + error = xfs_dir2_isblock(&args, &is_block); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error)) + goto out; + + /* Iterate all the data extents in the directory... */ + found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got); + while (found && !(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) { + /* No more data blocks... */ + if (got.br_startoff >= leaf_lblk) + break; + + /* + * Check each data block's bestfree data. + * + * Iterate all the fsbcount-aligned block offsets in + * this directory. The directory block reading code is + * smart enough to do its own bmap lookups to handle + * discontiguous directory blocks. When we're done + * with the extent record, re-query the bmap at the + * next fsbcount-aligned offset to avoid redundant + * block checks. + */ + for (lblk = roundup((xfs_dablk_t)got.br_startoff, + args.geo->fsbcount); + lblk < got.br_startoff + got.br_blockcount; + lblk += args.geo->fsbcount) { + last_data_db = xfs_dir2_da_to_db(args.geo, lblk); + error = xchk_directory_data_bestfree(sc, lblk, + is_block); + if (error) + goto out; + } + dabno = got.br_startoff + got.br_blockcount; + lblk = roundup(dabno, args.geo->fsbcount); + found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got); + } + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + /* Look for a leaf1 block, which has free info. */ + if (xfs_iext_lookup_extent(sc->ip, ifp, leaf_lblk, &icur, &got) && + got.br_startoff == leaf_lblk && + got.br_blockcount == args.geo->fsbcount && + !xfs_iext_next_extent(ifp, &icur, &got)) { + if (is_block) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out; + } + error = xchk_directory_leaf1_bestfree(sc, &args, last_data_db, + leaf_lblk); + if (error) + goto out; + } + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + /* Scan for free blocks */ + lblk = free_lblk; + found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got); + while (found && !(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) { + /* + * Dirs can't have blocks mapped above 2^32. + * Single-block dirs shouldn't even be here. + */ + lblk = got.br_startoff; + if (lblk & ~0xFFFFFFFFULL) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out; + } + if (is_block) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); + goto out; + } + + /* + * Check each dir free block's bestfree data. + * + * Iterate all the fsbcount-aligned block offsets in + * this directory. The directory block reading code is + * smart enough to do its own bmap lookups to handle + * discontiguous directory blocks. When we're done + * with the extent record, re-query the bmap at the + * next fsbcount-aligned offset to avoid redundant + * block checks. + */ + for (lblk = roundup((xfs_dablk_t)got.br_startoff, + args.geo->fsbcount); + lblk < got.br_startoff + got.br_blockcount; + lblk += args.geo->fsbcount) { + error = xchk_directory_free_bestfree(sc, &args, + lblk); + if (error) + goto out; + } + dabno = got.br_startoff + got.br_blockcount; + lblk = roundup(dabno, args.geo->fsbcount); + found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got); + } +out: + return error; +} + +/* Scrub a whole directory. */ +int +xchk_directory( + struct xfs_scrub *sc) +{ + struct xchk_dir_ctx sdc = { + .dir_iter.actor = xchk_dir_actor, + .dir_iter.pos = 0, + .sc = sc, + }; + size_t bufsize; + loff_t oldpos; + int error = 0; + + if (!S_ISDIR(VFS_I(sc->ip)->i_mode)) + return -ENOENT; + + /* Plausible size? */ + if (sc->ip->i_disk_size < xfs_dir2_sf_hdr_size(0)) { + xchk_ino_set_corrupt(sc, sc->ip->i_ino); + goto out; + } + + /* Check directory tree structure */ + error = xchk_da_btree(sc, XFS_DATA_FORK, xchk_dir_rec, NULL); + if (error) + return error; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return error; + + /* Check the freespace. */ + error = xchk_directory_blocks(sc); + if (error) + return error; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return error; + + /* + * Check that every dirent we see can also be looked up by hash. + * Userspace usually asks for a 32k buffer, so we will too. + */ + bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, + sc->ip->i_disk_size); + + /* + * Look up every name in this directory by hash. + * + * Use the xfs_readdir function to call xchk_dir_actor on + * every directory entry in this directory. In _actor, we check + * the name, inode number, and ftype (if applicable) of the + * entry. xfs_readdir uses the VFS filldir functions to provide + * iteration context. + * + * The VFS grabs a read or write lock via i_rwsem before it reads + * or writes to a directory. If we've gotten this far we've + * already obtained IOLOCK_EXCL, which (since 4.10) is the same as + * getting a write lock on i_rwsem. Therefore, it is safe for us + * to drop the ILOCK here in order to reuse the _readdir and + * _dir_lookup routines, which do their own ILOCK locking. + */ + oldpos = 0; + sc->ilock_flags &= ~XFS_ILOCK_EXCL; + xfs_iunlock(sc->ip, XFS_ILOCK_EXCL); + while (true) { + error = xfs_readdir(sc->tp, sc->ip, &sdc.dir_iter, bufsize); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, + &error)) + goto out; + if (oldpos == sdc.dir_iter.pos) + break; + oldpos = sdc.dir_iter.pos; + } + +out: + return error; +} diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c new file mode 100644 index 000000000..88d6961e3 --- /dev/null +++ b/fs/xfs/scrub/fscounters.c @@ -0,0 +1,381 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_health.h" +#include "xfs_btree.h" +#include "xfs_ag.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/trace.h" + +/* + * FS Summary Counters + * =================== + * + * The basics of filesystem summary counter checking are that we iterate the + * AGs counting the number of free blocks, free space btree blocks, per-AG + * reservations, inodes, delayed allocation reservations, and free inodes. + * Then we compare what we computed against the in-core counters. + * + * However, the reality is that summary counters are a tricky beast to check. + * While we /could/ freeze the filesystem and scramble around the AGs counting + * the free blocks, in practice we prefer not do that for a scan because + * freezing is costly. To get around this, we added a per-cpu counter of the + * delalloc reservations so that we can rotor around the AGs relatively + * quickly, and we allow the counts to be slightly off because we're not taking + * any locks while we do this. + * + * So the first thing we do is warm up the buffer cache in the setup routine by + * walking all the AGs to make sure the incore per-AG structure has been + * initialized. The expected value calculation then iterates the incore per-AG + * structures as quickly as it can. We snapshot the percpu counters before and + * after this operation and use the difference in counter values to guess at + * our tolerance for mismatch between expected and actual counter values. + */ + +/* + * Since the expected value computation is lockless but only browses incore + * values, the percpu counters should be fairly close to each other. However, + * we'll allow ourselves to be off by at least this (arbitrary) amount. + */ +#define XCHK_FSCOUNT_MIN_VARIANCE (512) + +/* + * Make sure the per-AG structure has been initialized from the on-disk header + * contents and trust that the incore counters match the ondisk counters. (The + * AGF and AGI scrubbers check them, and a normal xfs_scrub run checks the + * summary counters after checking all AG headers). Do this from the setup + * function so that the inner AG aggregation loop runs as quickly as possible. + * + * This function runs during the setup phase /before/ we start checking any + * metadata. + */ +STATIC int +xchk_fscount_warmup( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_buf *agi_bp = NULL; + struct xfs_buf *agf_bp = NULL; + struct xfs_perag *pag = NULL; + xfs_agnumber_t agno; + int error = 0; + + for_each_perag(mp, agno, pag) { + if (xchk_should_terminate(sc, &error)) + break; + if (pag->pagi_init && pag->pagf_init) + continue; + + /* Lock both AG headers. */ + error = xfs_ialloc_read_agi(pag, sc->tp, &agi_bp); + if (error) + break; + error = xfs_alloc_read_agf(pag, sc->tp, 0, &agf_bp); + if (error) + break; + + /* + * These are supposed to be initialized by the header read + * function. + */ + if (!pag->pagi_init || !pag->pagf_init) { + error = -EFSCORRUPTED; + break; + } + + xfs_buf_relse(agf_bp); + agf_bp = NULL; + xfs_buf_relse(agi_bp); + agi_bp = NULL; + } + + if (agf_bp) + xfs_buf_relse(agf_bp); + if (agi_bp) + xfs_buf_relse(agi_bp); + if (pag) + xfs_perag_put(pag); + return error; +} + +int +xchk_setup_fscounters( + struct xfs_scrub *sc) +{ + struct xchk_fscounters *fsc; + int error; + + sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0); + if (!sc->buf) + return -ENOMEM; + fsc = sc->buf; + + xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max); + + /* We must get the incore counters set up before we can proceed. */ + error = xchk_fscount_warmup(sc); + if (error) + return error; + + return xchk_trans_alloc(sc, 0); +} + +/* Count free space btree blocks manually for pre-lazysbcount filesystems. */ +static int +xchk_fscount_btreeblks( + struct xfs_scrub *sc, + struct xchk_fscounters *fsc, + xfs_agnumber_t agno) +{ + xfs_extlen_t blocks; + int error; + + error = xchk_ag_init_existing(sc, agno, &sc->sa); + if (error) + goto out_free; + + error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); + if (error) + goto out_free; + fsc->fdblocks += blocks - 1; + + error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); + if (error) + goto out_free; + fsc->fdblocks += blocks - 1; + +out_free: + xchk_ag_free(sc, &sc->sa); + return error; +} + +/* + * Calculate what the global in-core counters ought to be from the incore + * per-AG structure. Callers can compare this to the actual in-core counters + * to estimate by how much both in-core and on-disk counters need to be + * adjusted. + */ +STATIC int +xchk_fscount_aggregate_agcounts( + struct xfs_scrub *sc, + struct xchk_fscounters *fsc) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_perag *pag; + uint64_t delayed; + xfs_agnumber_t agno; + int tries = 8; + int error = 0; + +retry: + fsc->icount = 0; + fsc->ifree = 0; + fsc->fdblocks = 0; + + for_each_perag(mp, agno, pag) { + if (xchk_should_terminate(sc, &error)) + break; + + /* This somehow got unset since the warmup? */ + if (!pag->pagi_init || !pag->pagf_init) { + error = -EFSCORRUPTED; + break; + } + + /* Count all the inodes */ + fsc->icount += pag->pagi_count; + fsc->ifree += pag->pagi_freecount; + + /* Add up the free/freelist/bnobt/cntbt blocks */ + fsc->fdblocks += pag->pagf_freeblks; + fsc->fdblocks += pag->pagf_flcount; + if (xfs_has_lazysbcount(sc->mp)) { + fsc->fdblocks += pag->pagf_btreeblks; + } else { + error = xchk_fscount_btreeblks(sc, fsc, agno); + if (error) + break; + } + + /* + * Per-AG reservations are taken out of the incore counters, + * so they must be left out of the free blocks computation. + */ + fsc->fdblocks -= pag->pag_meta_resv.ar_reserved; + fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved; + + } + if (pag) + xfs_perag_put(pag); + if (error) + return error; + + /* + * The global incore space reservation is taken from the incore + * counters, so leave that out of the computation. + */ + fsc->fdblocks -= mp->m_resblks_avail; + + /* + * Delayed allocation reservations are taken out of the incore counters + * but not recorded on disk, so leave them and their indlen blocks out + * of the computation. + */ + delayed = percpu_counter_sum(&mp->m_delalloc_blks); + fsc->fdblocks -= delayed; + + trace_xchk_fscounters_calc(mp, fsc->icount, fsc->ifree, fsc->fdblocks, + delayed); + + + /* Bail out if the values we compute are totally nonsense. */ + if (fsc->icount < fsc->icount_min || fsc->icount > fsc->icount_max || + fsc->fdblocks > mp->m_sb.sb_dblocks || + fsc->ifree > fsc->icount_max) + return -EFSCORRUPTED; + + /* + * If ifree > icount then we probably had some perturbation in the + * counters while we were calculating things. We'll try a few times + * to maintain ifree <= icount before giving up. + */ + if (fsc->ifree > fsc->icount) { + if (tries--) + goto retry; + xchk_set_incomplete(sc); + return 0; + } + + return 0; +} + +/* + * Is the @counter reasonably close to the @expected value? + * + * We neither locked nor froze anything in the filesystem while aggregating the + * per-AG data to compute the @expected value, which means that the counter + * could have changed. We know the @old_value of the summation of the counter + * before the aggregation, and we re-sum the counter now. If the expected + * value falls between the two summations, we're ok. + * + * Otherwise, we /might/ have a problem. If the change in the summations is + * more than we want to tolerate, the filesystem is probably busy and we should + * just send back INCOMPLETE and see if userspace will try again. + */ +static inline bool +xchk_fscount_within_range( + struct xfs_scrub *sc, + const int64_t old_value, + struct percpu_counter *counter, + uint64_t expected) +{ + int64_t min_value, max_value; + int64_t curr_value = percpu_counter_sum(counter); + + trace_xchk_fscounters_within_range(sc->mp, expected, curr_value, + old_value); + + /* Negative values are always wrong. */ + if (curr_value < 0) + return false; + + /* Exact matches are always ok. */ + if (curr_value == expected) + return true; + + min_value = min(old_value, curr_value); + max_value = max(old_value, curr_value); + + /* Within the before-and-after range is ok. */ + if (expected >= min_value && expected <= max_value) + return true; + + /* + * If the difference between the two summations is too large, the fs + * might just be busy and so we'll mark the scrub incomplete. Return + * true here so that we don't mark the counter corrupt. + * + * XXX: In the future when userspace can grant scrub permission to + * quiesce the filesystem to solve the outsized variance problem, this + * check should be moved up and the return code changed to signal to + * userspace that we need quiesce permission. + */ + if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) { + xchk_set_incomplete(sc); + return true; + } + + return false; +} + +/* Check the superblock counters. */ +int +xchk_fscounters( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + struct xchk_fscounters *fsc = sc->buf; + int64_t icount, ifree, fdblocks; + int error; + + /* Snapshot the percpu counters. */ + icount = percpu_counter_sum(&mp->m_icount); + ifree = percpu_counter_sum(&mp->m_ifree); + fdblocks = percpu_counter_sum(&mp->m_fdblocks); + + /* No negative values, please! */ + if (icount < 0 || ifree < 0 || fdblocks < 0) + xchk_set_corrupt(sc); + + /* See if icount is obviously wrong. */ + if (icount < fsc->icount_min || icount > fsc->icount_max) + xchk_set_corrupt(sc); + + /* See if fdblocks is obviously wrong. */ + if (fdblocks > mp->m_sb.sb_dblocks) + xchk_set_corrupt(sc); + + /* + * XXX: We can't quiesce percpu counter updates, so exit early. + * This can be re-enabled when we gain exclusive freeze functionality. + */ + return 0; + + /* + * If ifree exceeds icount by more than the minimum variance then + * something's probably wrong with the counters. + */ + if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE) + xchk_set_corrupt(sc); + + /* Walk the incore AG headers to calculate the expected counters. */ + error = xchk_fscount_aggregate_agcounts(sc, fsc); + if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error)) + return error; + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) + return 0; + + /* Compare the in-core counters with whatever we counted. */ + if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount)) + xchk_set_corrupt(sc); + + if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) + xchk_set_corrupt(sc); + + if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks, + fsc->fdblocks)) + xchk_set_corrupt(sc); + + return 0; +} diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c new file mode 100644 index 000000000..aa65ec88a --- /dev/null +++ b/fs/xfs/scrub/health.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_btree.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_ag.h" +#include "xfs_health.h" +#include "scrub/scrub.h" +#include "scrub/health.h" + +/* + * Scrub and In-Core Filesystem Health Assessments + * =============================================== + * + * Online scrub and repair have the time and the ability to perform stronger + * checks than we can do from the metadata verifiers, because they can + * cross-reference records between data structures. Therefore, scrub is in a + * good position to update the online filesystem health assessments to reflect + * the good/bad state of the data structure. + * + * We therefore extend scrub in the following ways to achieve this: + * + * 1. Create a "sick_mask" field in the scrub context. When we're setting up a + * scrub call, set this to the default XFS_SICK_* flag(s) for the selected + * scrub type (call it A). Scrub and repair functions can override the default + * sick_mask value if they choose. + * + * 2. If the scrubber returns a runtime error code, we exit making no changes + * to the incore sick state. + * + * 3. If the scrubber finds that A is clean, use sick_mask to clear the incore + * sick flags before exiting. + * + * 4. If the scrubber finds that A is corrupt, use sick_mask to set the incore + * sick flags. If the user didn't want to repair then we exit, leaving the + * metadata structure unfixed and the sick flag set. + * + * 5. Now we know that A is corrupt and the user wants to repair, so run the + * repairer. If the repairer returns an error code, we exit with that error + * code, having made no further changes to the incore sick state. + * + * 6. If repair rebuilds A correctly and the subsequent re-scrub of A is clean, + * use sick_mask to clear the incore sick flags. This should have the effect + * that A is no longer marked sick. + * + * 7. If repair rebuilds A incorrectly, the re-scrub will find it corrupt and + * use sick_mask to set the incore sick flags. This should have no externally + * visible effect since we already set them in step (4). + * + * There are some complications to this story, however. For certain types of + * complementary metadata indices (e.g. inobt/finobt), it is easier to rebuild + * both structures at the same time. The following principles apply to this + * type of repair strategy: + * + * 8. Any repair function that rebuilds multiple structures should update + * sick_mask_visible to reflect whatever other structures are rebuilt, and + * verify that all the rebuilt structures can pass a scrub check. The outcomes + * of 5-7 still apply, but with a sick_mask that covers everything being + * rebuilt. + */ + +/* Map our scrub type to a sick mask and a set of health update functions. */ + +enum xchk_health_group { + XHG_FS = 1, + XHG_RT, + XHG_AG, + XHG_INO, +}; + +struct xchk_health_map { + enum xchk_health_group group; + unsigned int sick_mask; +}; + +static const struct xchk_health_map type_to_health_flag[XFS_SCRUB_TYPE_NR] = { + [XFS_SCRUB_TYPE_SB] = { XHG_AG, XFS_SICK_AG_SB }, + [XFS_SCRUB_TYPE_AGF] = { XHG_AG, XFS_SICK_AG_AGF }, + [XFS_SCRUB_TYPE_AGFL] = { XHG_AG, XFS_SICK_AG_AGFL }, + [XFS_SCRUB_TYPE_AGI] = { XHG_AG, XFS_SICK_AG_AGI }, + [XFS_SCRUB_TYPE_BNOBT] = { XHG_AG, XFS_SICK_AG_BNOBT }, + [XFS_SCRUB_TYPE_CNTBT] = { XHG_AG, XFS_SICK_AG_CNTBT }, + [XFS_SCRUB_TYPE_INOBT] = { XHG_AG, XFS_SICK_AG_INOBT }, + [XFS_SCRUB_TYPE_FINOBT] = { XHG_AG, XFS_SICK_AG_FINOBT }, + [XFS_SCRUB_TYPE_RMAPBT] = { XHG_AG, XFS_SICK_AG_RMAPBT }, + [XFS_SCRUB_TYPE_REFCNTBT] = { XHG_AG, XFS_SICK_AG_REFCNTBT }, + [XFS_SCRUB_TYPE_INODE] = { XHG_INO, XFS_SICK_INO_CORE }, + [XFS_SCRUB_TYPE_BMBTD] = { XHG_INO, XFS_SICK_INO_BMBTD }, + [XFS_SCRUB_TYPE_BMBTA] = { XHG_INO, XFS_SICK_INO_BMBTA }, + [XFS_SCRUB_TYPE_BMBTC] = { XHG_INO, XFS_SICK_INO_BMBTC }, + [XFS_SCRUB_TYPE_DIR] = { XHG_INO, XFS_SICK_INO_DIR }, + [XFS_SCRUB_TYPE_XATTR] = { XHG_INO, XFS_SICK_INO_XATTR }, + [XFS_SCRUB_TYPE_SYMLINK] = { XHG_INO, XFS_SICK_INO_SYMLINK }, + [XFS_SCRUB_TYPE_PARENT] = { XHG_INO, XFS_SICK_INO_PARENT }, + [XFS_SCRUB_TYPE_RTBITMAP] = { XHG_RT, XFS_SICK_RT_BITMAP }, + [XFS_SCRUB_TYPE_RTSUM] = { XHG_RT, XFS_SICK_RT_SUMMARY }, + [XFS_SCRUB_TYPE_UQUOTA] = { XHG_FS, XFS_SICK_FS_UQUOTA }, + [XFS_SCRUB_TYPE_GQUOTA] = { XHG_FS, XFS_SICK_FS_GQUOTA }, + [XFS_SCRUB_TYPE_PQUOTA] = { XHG_FS, XFS_SICK_FS_PQUOTA }, + [XFS_SCRUB_TYPE_FSCOUNTERS] = { XHG_FS, XFS_SICK_FS_COUNTERS }, +}; + +/* Return the health status mask for this scrub type. */ +unsigned int +xchk_health_mask_for_scrub_type( + __u32 scrub_type) +{ + return type_to_health_flag[scrub_type].sick_mask; +} + +/* + * Update filesystem health assessments based on what we found and did. + * + * If the scrubber finds errors, we mark sick whatever's mentioned in + * sick_mask, no matter whether this is a first scan or an + * evaluation of repair effectiveness. + * + * Otherwise, no direct corruption was found, so mark whatever's in + * sick_mask as healthy. + */ +void +xchk_update_health( + struct xfs_scrub *sc) +{ + struct xfs_perag *pag; + bool bad; + + if (!sc->sick_mask) + return; + + bad = (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | + XFS_SCRUB_OFLAG_XCORRUPT)); + switch (type_to_health_flag[sc->sm->sm_type].group) { + case XHG_AG: + pag = xfs_perag_get(sc->mp, sc->sm->sm_agno); + if (bad) + xfs_ag_mark_sick(pag, sc->sick_mask); + else + xfs_ag_mark_healthy(pag, sc->sick_mask); + xfs_perag_put(pag); + break; + case XHG_INO: + if (!sc->ip) + return; + if (bad) + xfs_inode_mark_sick(sc->ip, sc->sick_mask); + else + xfs_inode_mark_healthy(sc->ip, sc->sick_mask); + break; + case XHG_FS: + if (bad) + xfs_fs_mark_sick(sc->mp, sc->sick_mask); + else + xfs_fs_mark_healthy(sc->mp, sc->sick_mask); + break; + case XHG_RT: + if (bad) + xfs_rt_mark_sick(sc->mp, sc->sick_mask); + else + xfs_rt_mark_healthy(sc->mp, sc->sick_mask); + break; + default: + ASSERT(0); + break; + } +} + +/* Is the given per-AG btree healthy enough for scanning? */ +bool +xchk_ag_btree_healthy_enough( + struct xfs_scrub *sc, + struct xfs_perag *pag, + xfs_btnum_t btnum) +{ + unsigned int mask = 0; + + /* + * We always want the cursor if it's the same type as whatever we're + * scrubbing, even if we already know the structure is corrupt. + * + * Otherwise, we're only interested in the btree for cross-referencing. + * If we know the btree is bad then don't bother, just set XFAIL. + */ + switch (btnum) { + case XFS_BTNUM_BNO: + if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT) + return true; + mask = XFS_SICK_AG_BNOBT; + break; + case XFS_BTNUM_CNT: + if (sc->sm->sm_type == XFS_SCRUB_TYPE_CNTBT) + return true; + mask = XFS_SICK_AG_CNTBT; + break; + case XFS_BTNUM_INO: + if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT) + return true; + mask = XFS_SICK_AG_INOBT; + break; + case XFS_BTNUM_FINO: + if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT) + return true; + mask = XFS_SICK_AG_FINOBT; + break; + case XFS_BTNUM_RMAP: + if (sc->sm->sm_type == XFS_SCRUB_TYPE_RMAPBT) + return true; + mask = XFS_SICK_AG_RMAPBT; + break; + case XFS_BTNUM_REFC: + if (sc->sm->sm_type == XFS_SCRUB_TYPE_REFCNTBT) + return true; + mask = XFS_SICK_AG_REFCNTBT; + break; + default: + ASSERT(0); + return true; + } + + if (xfs_ag_has_sickness(pag, mask)) { + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL; + return false; + } + + return true; +} diff --git a/fs/xfs/scrub/health.h b/fs/xfs/scrub/health.h new file mode 100644 index 000000000..d0b938d3d --- /dev/null +++ b/fs/xfs/scrub/health.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_HEALTH_H__ +#define __XFS_SCRUB_HEALTH_H__ + +unsigned int xchk_health_mask_for_scrub_type(__u32 scrub_type); +void xchk_update_health(struct xfs_scrub *sc); +bool xchk_ag_btree_healthy_enough(struct xfs_scrub *sc, struct xfs_perag *pag, + xfs_btnum_t btnum); + +#endif /* __XFS_SCRUB_HEALTH_H__ */ diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c new file mode 100644 index 000000000..e312be7cd --- /dev/null +++ b/fs/xfs/scrub/ialloc.c @@ -0,0 +1,659 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_inode.h" +#include "xfs_ialloc.h" +#include "xfs_ialloc_btree.h" +#include "xfs_icache.h" +#include "xfs_rmap.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" +#include "scrub/trace.h" +#include "xfs_ag.h" + +/* + * Set us up to scrub inode btrees. + * If we detect a discrepancy between the inobt and the inode, + * try again after forcing logged inode cores out to disk. + */ +int +xchk_setup_ag_iallocbt( + struct xfs_scrub *sc) +{ + return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER); +} + +/* Inode btree scrubber. */ + +struct xchk_iallocbt { + /* Number of inodes we see while scanning inobt. */ + unsigned long long inodes; + + /* Expected next startino, for big block filesystems. */ + xfs_agino_t next_startino; + + /* Expected end of the current inode cluster. */ + xfs_agino_t next_cluster_ino; +}; + +/* + * If we're checking the finobt, cross-reference with the inobt. + * Otherwise we're checking the inobt; if there is an finobt, make sure + * we have a record or not depending on freecount. + */ +static inline void +xchk_iallocbt_chunk_xref_other( + struct xfs_scrub *sc, + struct xfs_inobt_rec_incore *irec, + xfs_agino_t agino) +{ + struct xfs_btree_cur **pcur; + bool has_irec; + int error; + + if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT) + pcur = &sc->sa.ino_cur; + else + pcur = &sc->sa.fino_cur; + if (!(*pcur)) + return; + error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec); + if (!xchk_should_check_xref(sc, &error, pcur)) + return; + if (((irec->ir_freecount > 0 && !has_irec) || + (irec->ir_freecount == 0 && has_irec))) + xchk_btree_xref_set_corrupt(sc, *pcur, 0); +} + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_iallocbt_chunk_xref( + struct xfs_scrub *sc, + struct xfs_inobt_rec_incore *irec, + xfs_agino_t agino, + xfs_agblock_t agbno, + xfs_extlen_t len) +{ + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + xchk_xref_is_used_space(sc, agbno, len); + xchk_iallocbt_chunk_xref_other(sc, irec, agino); + xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES); + xchk_xref_is_not_shared(sc, agbno, len); +} + +/* Is this chunk worth checking? */ +STATIC bool +xchk_iallocbt_chunk( + struct xchk_btree *bs, + struct xfs_inobt_rec_incore *irec, + xfs_agino_t agino, + xfs_extlen_t len) +{ + struct xfs_mount *mp = bs->cur->bc_mp; + struct xfs_perag *pag = bs->cur->bc_ag.pag; + xfs_agblock_t bno; + + bno = XFS_AGINO_TO_AGBNO(mp, agino); + + if (!xfs_verify_agbext(pag, bno, len)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len); + + return true; +} + +/* Count the number of free inodes. */ +static unsigned int +xchk_iallocbt_freecount( + xfs_inofree_t freemask) +{ + BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64)); + return hweight64(freemask); +} + +/* + * Check that an inode's allocation status matches ir_free in the inobt + * record. First we try querying the in-core inode state, and if the inode + * isn't loaded we examine the on-disk inode directly. + * + * Since there can be 1:M and M:1 mappings between inobt records and inode + * clusters, we pass in the inode location information as an inobt record; + * the index of an inode cluster within the inobt record (as well as the + * cluster buffer itself); and the index of the inode within the cluster. + * + * @irec is the inobt record. + * @irec_ino is the inode offset from the start of the record. + * @dip is the on-disk inode. + */ +STATIC int +xchk_iallocbt_check_cluster_ifree( + struct xchk_btree *bs, + struct xfs_inobt_rec_incore *irec, + unsigned int irec_ino, + struct xfs_dinode *dip) +{ + struct xfs_mount *mp = bs->cur->bc_mp; + xfs_ino_t fsino; + xfs_agino_t agino; + bool irec_free; + bool ino_inuse; + bool freemask_ok; + int error = 0; + + if (xchk_should_terminate(bs->sc, &error)) + return error; + + /* + * Given an inobt record and the offset of an inode from the start of + * the record, compute which fs inode we're talking about. + */ + agino = irec->ir_startino + irec_ino; + fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.pag->pag_agno, agino); + irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino)); + + if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || + (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) { + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + goto out; + } + + error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino, + &ino_inuse); + if (error == -ENODATA) { + /* Not cached, just read the disk buffer */ + freemask_ok = irec_free ^ !!(dip->di_mode); + if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok) + return -EDEADLOCK; + } else if (error < 0) { + /* + * Inode is only half assembled, or there was an IO error, + * or the verifier failed, so don't bother trying to check. + * The inode scrubber can deal with this. + */ + goto out; + } else { + /* Inode is all there. */ + freemask_ok = irec_free ^ ino_inuse; + } + if (!freemask_ok) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); +out: + return 0; +} + +/* + * Check that the holemask and freemask of a hypothetical inode cluster match + * what's actually on disk. If sparse inodes are enabled, the cluster does + * not actually have to map to inodes if the corresponding holemask bit is set. + * + * @cluster_base is the first inode in the cluster within the @irec. + */ +STATIC int +xchk_iallocbt_check_cluster( + struct xchk_btree *bs, + struct xfs_inobt_rec_incore *irec, + unsigned int cluster_base) +{ + struct xfs_imap imap; + struct xfs_mount *mp = bs->cur->bc_mp; + struct xfs_buf *cluster_bp; + unsigned int nr_inodes; + xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno; + xfs_agblock_t agbno; + unsigned int cluster_index; + uint16_t cluster_mask = 0; + uint16_t ir_holemask; + int error = 0; + + nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK, + M_IGEO(mp)->inodes_per_cluster); + + /* Map this inode cluster */ + agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base); + + /* Compute a bitmask for this cluster that can be used for holemask. */ + for (cluster_index = 0; + cluster_index < nr_inodes; + cluster_index += XFS_INODES_PER_HOLEMASK_BIT) + cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) / + XFS_INODES_PER_HOLEMASK_BIT); + + /* + * Map the first inode of this cluster to a buffer and offset. + * Be careful about inobt records that don't align with the start of + * the inode buffer when block sizes are large enough to hold multiple + * inode chunks. When this happens, cluster_base will be zero but + * ir_startino can be large enough to make im_boffset nonzero. + */ + ir_holemask = (irec->ir_holemask & cluster_mask); + imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno); + imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); + imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) << + mp->m_sb.sb_inodelog; + + if (imap.im_boffset != 0 && cluster_base != 0) { + ASSERT(imap.im_boffset == 0 || cluster_base == 0); + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + return 0; + } + + trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino, + imap.im_blkno, imap.im_len, cluster_base, nr_inodes, + cluster_mask, ir_holemask, + XFS_INO_TO_OFFSET(mp, irec->ir_startino + + cluster_base)); + + /* The whole cluster must be a hole or not a hole. */ + if (ir_holemask != cluster_mask && ir_holemask != 0) { + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + return 0; + } + + /* If any part of this is a hole, skip it. */ + if (ir_holemask) { + xchk_xref_is_not_owned_by(bs->sc, agbno, + M_IGEO(mp)->blocks_per_cluster, + &XFS_RMAP_OINFO_INODES); + return 0; + } + + xchk_xref_is_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster, + &XFS_RMAP_OINFO_INODES); + + /* Grab the inode cluster buffer. */ + error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp); + if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error)) + return error; + + /* Check free status of each inode within this cluster. */ + for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) { + struct xfs_dinode *dip; + + if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) { + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + break; + } + + dip = xfs_buf_offset(cluster_bp, imap.im_boffset); + error = xchk_iallocbt_check_cluster_ifree(bs, irec, + cluster_base + cluster_index, dip); + if (error) + break; + imap.im_boffset += mp->m_sb.sb_inodesize; + } + + xfs_trans_brelse(bs->cur->bc_tp, cluster_bp); + return error; +} + +/* + * For all the inode clusters that could map to this inobt record, make sure + * that the holemask makes sense and that the allocation status of each inode + * matches the freemask. + */ +STATIC int +xchk_iallocbt_check_clusters( + struct xchk_btree *bs, + struct xfs_inobt_rec_incore *irec) +{ + unsigned int cluster_base; + int error = 0; + + /* + * For the common case where this inobt record maps to multiple inode + * clusters this will call _check_cluster for each cluster. + * + * For the case that multiple inobt records map to a single cluster, + * this will call _check_cluster once. + */ + for (cluster_base = 0; + cluster_base < XFS_INODES_PER_CHUNK; + cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) { + error = xchk_iallocbt_check_cluster(bs, irec, cluster_base); + if (error) + break; + } + + return error; +} + +/* + * Make sure this inode btree record is aligned properly. Because a fs block + * contains multiple inodes, we check that the inobt record is aligned to the + * correct inode, not just the correct block on disk. This results in a finer + * grained corruption check. + */ +STATIC void +xchk_iallocbt_rec_alignment( + struct xchk_btree *bs, + struct xfs_inobt_rec_incore *irec) +{ + struct xfs_mount *mp = bs->sc->mp; + struct xchk_iallocbt *iabt = bs->private; + struct xfs_ino_geometry *igeo = M_IGEO(mp); + + /* + * finobt records have different positioning requirements than inobt + * records: each finobt record must have a corresponding inobt record. + * That is checked in the xref function, so for now we only catch the + * obvious case where the record isn't at all aligned properly. + * + * Note that if a fs block contains more than a single chunk of inodes, + * we will have finobt records only for those chunks containing free + * inodes, and therefore expect chunk alignment of finobt records. + * Otherwise, we expect that the finobt record is aligned to the + * cluster alignment as told by the superblock. + */ + if (bs->cur->bc_btnum == XFS_BTNUM_FINO) { + unsigned int imask; + + imask = min_t(unsigned int, XFS_INODES_PER_CHUNK, + igeo->cluster_align_inodes) - 1; + if (irec->ir_startino & imask) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + return; + } + + if (iabt->next_startino != NULLAGINO) { + /* + * We're midway through a cluster of inodes that is mapped by + * multiple inobt records. Did we get the record for the next + * irec in the sequence? + */ + if (irec->ir_startino != iabt->next_startino) { + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + return; + } + + iabt->next_startino += XFS_INODES_PER_CHUNK; + + /* Are we done with the cluster? */ + if (iabt->next_startino >= iabt->next_cluster_ino) { + iabt->next_startino = NULLAGINO; + iabt->next_cluster_ino = NULLAGINO; + } + return; + } + + /* inobt records must be aligned to cluster and inoalignmnt size. */ + if (irec->ir_startino & (igeo->cluster_align_inodes - 1)) { + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + return; + } + + if (irec->ir_startino & (igeo->inodes_per_cluster - 1)) { + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + return; + } + + if (igeo->inodes_per_cluster <= XFS_INODES_PER_CHUNK) + return; + + /* + * If this is the start of an inode cluster that can be mapped by + * multiple inobt records, the next inobt record must follow exactly + * after this one. + */ + iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK; + iabt->next_cluster_ino = irec->ir_startino + igeo->inodes_per_cluster; +} + +/* Scrub an inobt/finobt record. */ +STATIC int +xchk_iallocbt_rec( + struct xchk_btree *bs, + const union xfs_btree_rec *rec) +{ + struct xfs_mount *mp = bs->cur->bc_mp; + struct xfs_perag *pag = bs->cur->bc_ag.pag; + struct xchk_iallocbt *iabt = bs->private; + struct xfs_inobt_rec_incore irec; + uint64_t holes; + xfs_agino_t agino; + xfs_extlen_t len; + int holecount; + int i; + int error = 0; + unsigned int real_freecount; + uint16_t holemask; + + xfs_inobt_btrec_to_irec(mp, rec, &irec); + + if (irec.ir_count > XFS_INODES_PER_CHUNK || + irec.ir_freecount > XFS_INODES_PER_CHUNK) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + real_freecount = irec.ir_freecount + + (XFS_INODES_PER_CHUNK - irec.ir_count); + if (real_freecount != xchk_iallocbt_freecount(irec.ir_free)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + agino = irec.ir_startino; + /* Record has to be properly aligned within the AG. */ + if (!xfs_verify_agino(pag, agino) || + !xfs_verify_agino(pag, agino + XFS_INODES_PER_CHUNK - 1)) { + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + goto out; + } + + xchk_iallocbt_rec_alignment(bs, &irec); + if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + iabt->inodes += irec.ir_count; + + /* Handle non-sparse inodes */ + if (!xfs_inobt_issparse(irec.ir_holemask)) { + len = XFS_B_TO_FSB(mp, + XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize); + if (irec.ir_count != XFS_INODES_PER_CHUNK) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + if (!xchk_iallocbt_chunk(bs, &irec, agino, len)) + goto out; + goto check_clusters; + } + + /* Check each chunk of a sparse inode cluster. */ + holemask = irec.ir_holemask; + holecount = 0; + len = XFS_B_TO_FSB(mp, + XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize); + holes = ~xfs_inobt_irec_to_allocmask(&irec); + if ((holes & irec.ir_free) != holes || + irec.ir_freecount > irec.ir_count) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) { + if (holemask & 1) + holecount += XFS_INODES_PER_HOLEMASK_BIT; + else if (!xchk_iallocbt_chunk(bs, &irec, agino, len)) + break; + holemask >>= 1; + agino += XFS_INODES_PER_HOLEMASK_BIT; + } + + if (holecount > XFS_INODES_PER_CHUNK || + holecount + irec.ir_count != XFS_INODES_PER_CHUNK) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + +check_clusters: + error = xchk_iallocbt_check_clusters(bs, &irec); + if (error) + goto out; + +out: + return error; +} + +/* + * Make sure the inode btrees are as large as the rmap thinks they are. + * Don't bother if we're missing btree cursors, as we're already corrupt. + */ +STATIC void +xchk_iallocbt_xref_rmap_btreeblks( + struct xfs_scrub *sc, + int which) +{ + xfs_filblks_t blocks; + xfs_extlen_t inobt_blocks = 0; + xfs_extlen_t finobt_blocks = 0; + int error; + + if (!sc->sa.ino_cur || !sc->sa.rmap_cur || + (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) || + xchk_skip_xref(sc->sm)) + return; + + /* Check that we saw as many inobt blocks as the rmap says. */ + error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks); + if (!xchk_process_error(sc, 0, 0, &error)) + return; + + if (sc->sa.fino_cur) { + error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks); + if (!xchk_process_error(sc, 0, 0, &error)) + return; + } + + error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, + &XFS_RMAP_OINFO_INOBT, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) + return; + if (blocks != inobt_blocks + finobt_blocks) + xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0); +} + +/* + * Make sure that the inobt records point to the same number of blocks as + * the rmap says are owned by inodes. + */ +STATIC void +xchk_iallocbt_xref_rmap_inodes( + struct xfs_scrub *sc, + int which, + unsigned long long inodes) +{ + xfs_filblks_t blocks; + xfs_filblks_t inode_blocks; + int error; + + if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) + return; + + /* Check that we saw as many inode blocks as the rmap knows about. */ + error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, + &XFS_RMAP_OINFO_INODES, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) + return; + inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize); + if (blocks != inode_blocks) + xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); +} + +/* Scrub the inode btrees for some AG. */ +STATIC int +xchk_iallocbt( + struct xfs_scrub *sc, + xfs_btnum_t which) +{ + struct xfs_btree_cur *cur; + struct xchk_iallocbt iabt = { + .inodes = 0, + .next_startino = NULLAGINO, + .next_cluster_ino = NULLAGINO, + }; + int error; + + cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur; + error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT, + &iabt); + if (error) + return error; + + xchk_iallocbt_xref_rmap_btreeblks(sc, which); + + /* + * If we're scrubbing the inode btree, inode_blocks is the number of + * blocks pointed to by all the inode chunk records. Therefore, we + * should compare to the number of inode chunk blocks that the rmap + * knows about. We can't do this for the finobt since it only points + * to inode chunks with free inodes. + */ + if (which == XFS_BTNUM_INO) + xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes); + + return error; +} + +int +xchk_inobt( + struct xfs_scrub *sc) +{ + return xchk_iallocbt(sc, XFS_BTNUM_INO); +} + +int +xchk_finobt( + struct xfs_scrub *sc) +{ + return xchk_iallocbt(sc, XFS_BTNUM_FINO); +} + +/* See if an inode btree has (or doesn't have) an inode chunk record. */ +static inline void +xchk_xref_inode_check( + struct xfs_scrub *sc, + xfs_agblock_t agbno, + xfs_extlen_t len, + struct xfs_btree_cur **icur, + bool should_have_inodes) +{ + bool has_inodes; + int error; + + if (!(*icur) || xchk_skip_xref(sc->sm)) + return; + + error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes); + if (!xchk_should_check_xref(sc, &error, icur)) + return; + if (has_inodes != should_have_inodes) + xchk_btree_xref_set_corrupt(sc, *icur, 0); +} + +/* xref check that the extent is not covered by inodes */ +void +xchk_xref_is_not_inode_chunk( + struct xfs_scrub *sc, + xfs_agblock_t agbno, + xfs_extlen_t len) +{ + xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false); + xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false); +} + +/* xref check that the extent is covered by inodes */ +void +xchk_xref_is_inode_chunk( + struct xfs_scrub *sc, + xfs_agblock_t agbno, + xfs_extlen_t len) +{ + xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true); +} diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c new file mode 100644 index 000000000..51820b40a --- /dev/null +++ b/fs/xfs/scrub/inode.c @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_log_format.h" +#include "xfs_inode.h" +#include "xfs_ialloc.h" +#include "xfs_da_format.h" +#include "xfs_reflink.h" +#include "xfs_rmap.h" +#include "xfs_bmap_util.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" + +/* + * Grab total control of the inode metadata. It doesn't matter here if + * the file data is still changing; exclusive access to the metadata is + * the goal. + */ +int +xchk_setup_inode( + struct xfs_scrub *sc) +{ + int error; + + /* + * Try to get the inode. If the verifiers fail, we try again + * in raw mode. + */ + error = xchk_get_inode(sc); + switch (error) { + case 0: + break; + case -EFSCORRUPTED: + case -EFSBADCRC: + return xchk_trans_alloc(sc, 0); + default: + return error; + } + + /* Got the inode, lock it and we're ready to go. */ + sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; + xfs_ilock(sc->ip, sc->ilock_flags); + error = xchk_trans_alloc(sc, 0); + if (error) + goto out; + sc->ilock_flags |= XFS_ILOCK_EXCL; + xfs_ilock(sc->ip, XFS_ILOCK_EXCL); + +out: + /* scrub teardown will unlock and release the inode for us */ + return error; +} + +/* Inode core */ + +/* Validate di_extsize hint. */ +STATIC void +xchk_inode_extsize( + struct xfs_scrub *sc, + struct xfs_dinode *dip, + xfs_ino_t ino, + uint16_t mode, + uint16_t flags) +{ + xfs_failaddr_t fa; + uint32_t value = be32_to_cpu(dip->di_extsize); + + fa = xfs_inode_validate_extsize(sc->mp, value, mode, flags); + if (fa) + xchk_ino_set_corrupt(sc, ino); + + /* + * XFS allows a sysadmin to change the rt extent size when adding a rt + * section to a filesystem after formatting. If there are any + * directories with extszinherit and rtinherit set, the hint could + * become misaligned with the new rextsize. The verifier doesn't check + * this, because we allow rtinherit directories even without an rt + * device. Flag this as an administrative warning since we will clean + * this up eventually. + */ + if ((flags & XFS_DIFLAG_RTINHERIT) && + (flags & XFS_DIFLAG_EXTSZINHERIT) && + value % sc->mp->m_sb.sb_rextsize > 0) + xchk_ino_set_warning(sc, ino); +} + +/* + * Validate di_cowextsize hint. + * + * The rules are documented at xfs_ioctl_setattr_check_cowextsize(). + * These functions must be kept in sync with each other. + */ +STATIC void +xchk_inode_cowextsize( + struct xfs_scrub *sc, + struct xfs_dinode *dip, + xfs_ino_t ino, + uint16_t mode, + uint16_t flags, + uint64_t flags2) +{ + xfs_failaddr_t fa; + + fa = xfs_inode_validate_cowextsize(sc->mp, + be32_to_cpu(dip->di_cowextsize), mode, flags, + flags2); + if (fa) + xchk_ino_set_corrupt(sc, ino); +} + +/* Make sure the di_flags make sense for the inode. */ +STATIC void +xchk_inode_flags( + struct xfs_scrub *sc, + struct xfs_dinode *dip, + xfs_ino_t ino, + uint16_t mode, + uint16_t flags) +{ + struct xfs_mount *mp = sc->mp; + + /* di_flags are all taken, last bit cannot be used */ + if (flags & ~XFS_DIFLAG_ANY) + goto bad; + + /* rt flags require rt device */ + if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp) + goto bad; + + /* new rt bitmap flag only valid for rbmino */ + if ((flags & XFS_DIFLAG_NEWRTBM) && ino != mp->m_sb.sb_rbmino) + goto bad; + + /* directory-only flags */ + if ((flags & (XFS_DIFLAG_RTINHERIT | + XFS_DIFLAG_EXTSZINHERIT | + XFS_DIFLAG_PROJINHERIT | + XFS_DIFLAG_NOSYMLINKS)) && + !S_ISDIR(mode)) + goto bad; + + /* file-only flags */ + if ((flags & (XFS_DIFLAG_REALTIME | FS_XFLAG_EXTSIZE)) && + !S_ISREG(mode)) + goto bad; + + /* filestreams and rt make no sense */ + if ((flags & XFS_DIFLAG_FILESTREAM) && (flags & XFS_DIFLAG_REALTIME)) + goto bad; + + return; +bad: + xchk_ino_set_corrupt(sc, ino); +} + +/* Make sure the di_flags2 make sense for the inode. */ +STATIC void +xchk_inode_flags2( + struct xfs_scrub *sc, + struct xfs_dinode *dip, + xfs_ino_t ino, + uint16_t mode, + uint16_t flags, + uint64_t flags2) +{ + struct xfs_mount *mp = sc->mp; + + /* Unknown di_flags2 could be from a future kernel */ + if (flags2 & ~XFS_DIFLAG2_ANY) + xchk_ino_set_warning(sc, ino); + + /* reflink flag requires reflink feature */ + if ((flags2 & XFS_DIFLAG2_REFLINK) && + !xfs_has_reflink(mp)) + goto bad; + + /* cowextsize flag is checked w.r.t. mode separately */ + + /* file/dir-only flags */ + if ((flags2 & XFS_DIFLAG2_DAX) && !(S_ISREG(mode) || S_ISDIR(mode))) + goto bad; + + /* file-only flags */ + if ((flags2 & XFS_DIFLAG2_REFLINK) && !S_ISREG(mode)) + goto bad; + + /* realtime and reflink make no sense, currently */ + if ((flags & XFS_DIFLAG_REALTIME) && (flags2 & XFS_DIFLAG2_REFLINK)) + goto bad; + + /* no bigtime iflag without the bigtime feature */ + if (xfs_dinode_has_bigtime(dip) && !xfs_has_bigtime(mp)) + goto bad; + + return; +bad: + xchk_ino_set_corrupt(sc, ino); +} + +static inline void +xchk_dinode_nsec( + struct xfs_scrub *sc, + xfs_ino_t ino, + struct xfs_dinode *dip, + const xfs_timestamp_t ts) +{ + struct timespec64 tv; + + tv = xfs_inode_from_disk_ts(dip, ts); + if (tv.tv_nsec < 0 || tv.tv_nsec >= NSEC_PER_SEC) + xchk_ino_set_corrupt(sc, ino); +} + +/* Scrub all the ondisk inode fields. */ +STATIC void +xchk_dinode( + struct xfs_scrub *sc, + struct xfs_dinode *dip, + xfs_ino_t ino) +{ + struct xfs_mount *mp = sc->mp; + size_t fork_recs; + unsigned long long isize; + uint64_t flags2; + xfs_extnum_t nextents; + xfs_extnum_t naextents; + prid_t prid; + uint16_t flags; + uint16_t mode; + + flags = be16_to_cpu(dip->di_flags); + if (dip->di_version >= 3) + flags2 = be64_to_cpu(dip->di_flags2); + else + flags2 = 0; + + /* di_mode */ + mode = be16_to_cpu(dip->di_mode); + switch (mode & S_IFMT) { + case S_IFLNK: + case S_IFREG: + case S_IFDIR: + case S_IFCHR: + case S_IFBLK: + case S_IFIFO: + case S_IFSOCK: + /* mode is recognized */ + break; + default: + xchk_ino_set_corrupt(sc, ino); + break; + } + + /* v1/v2 fields */ + switch (dip->di_version) { + case 1: + /* + * We autoconvert v1 inodes into v2 inodes on writeout, + * so just mark this inode for preening. + */ + xchk_ino_set_preen(sc, ino); + prid = 0; + break; + case 2: + case 3: + if (dip->di_onlink != 0) + xchk_ino_set_corrupt(sc, ino); + + if (dip->di_mode == 0 && sc->ip) + xchk_ino_set_corrupt(sc, ino); + + if (dip->di_projid_hi != 0 && + !xfs_has_projid32(mp)) + xchk_ino_set_corrupt(sc, ino); + + prid = be16_to_cpu(dip->di_projid_lo); + break; + default: + xchk_ino_set_corrupt(sc, ino); + return; + } + + if (xfs_has_projid32(mp)) + prid |= (prid_t)be16_to_cpu(dip->di_projid_hi) << 16; + + /* + * di_uid/di_gid -- -1 isn't invalid, but there's no way that + * userspace could have created that. + */ + if (dip->di_uid == cpu_to_be32(-1U) || + dip->di_gid == cpu_to_be32(-1U)) + xchk_ino_set_warning(sc, ino); + + /* + * project id of -1 isn't supposed to be valid, but the kernel didn't + * always validate that. + */ + if (prid == -1U) + xchk_ino_set_warning(sc, ino); + + /* di_format */ + switch (dip->di_format) { + case XFS_DINODE_FMT_DEV: + if (!S_ISCHR(mode) && !S_ISBLK(mode) && + !S_ISFIFO(mode) && !S_ISSOCK(mode)) + xchk_ino_set_corrupt(sc, ino); + break; + case XFS_DINODE_FMT_LOCAL: + if (!S_ISDIR(mode) && !S_ISLNK(mode)) + xchk_ino_set_corrupt(sc, ino); + break; + case XFS_DINODE_FMT_EXTENTS: + if (!S_ISREG(mode) && !S_ISDIR(mode) && !S_ISLNK(mode)) + xchk_ino_set_corrupt(sc, ino); + break; + case XFS_DINODE_FMT_BTREE: + if (!S_ISREG(mode) && !S_ISDIR(mode)) + xchk_ino_set_corrupt(sc, ino); + break; + case XFS_DINODE_FMT_UUID: + default: + xchk_ino_set_corrupt(sc, ino); + break; + } + + /* di_[amc]time.nsec */ + xchk_dinode_nsec(sc, ino, dip, dip->di_atime); + xchk_dinode_nsec(sc, ino, dip, dip->di_mtime); + xchk_dinode_nsec(sc, ino, dip, dip->di_ctime); + + /* + * di_size. xfs_dinode_verify checks for things that screw up + * the VFS such as the upper bit being set and zero-length + * symlinks/directories, but we can do more here. + */ + isize = be64_to_cpu(dip->di_size); + if (isize & (1ULL << 63)) + xchk_ino_set_corrupt(sc, ino); + + /* Devices, fifos, and sockets must have zero size */ + if (!S_ISDIR(mode) && !S_ISREG(mode) && !S_ISLNK(mode) && isize != 0) + xchk_ino_set_corrupt(sc, ino); + + /* Directories can't be larger than the data section size (32G) */ + if (S_ISDIR(mode) && (isize == 0 || isize >= XFS_DIR2_SPACE_SIZE)) + xchk_ino_set_corrupt(sc, ino); + + /* Symlinks can't be larger than SYMLINK_MAXLEN */ + if (S_ISLNK(mode) && (isize == 0 || isize >= XFS_SYMLINK_MAXLEN)) + xchk_ino_set_corrupt(sc, ino); + + /* + * Warn if the running kernel can't handle the kinds of offsets + * needed to deal with the file size. In other words, if the + * pagecache can't cache all the blocks in this file due to + * overly large offsets, flag the inode for admin review. + */ + if (isize >= mp->m_super->s_maxbytes) + xchk_ino_set_warning(sc, ino); + + /* di_nblocks */ + if (flags2 & XFS_DIFLAG2_REFLINK) { + ; /* nblocks can exceed dblocks */ + } else if (flags & XFS_DIFLAG_REALTIME) { + /* + * nblocks is the sum of data extents (in the rtdev), + * attr extents (in the datadev), and both forks' bmbt + * blocks (in the datadev). This clumsy check is the + * best we can do without cross-referencing with the + * inode forks. + */ + if (be64_to_cpu(dip->di_nblocks) >= + mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks) + xchk_ino_set_corrupt(sc, ino); + } else { + if (be64_to_cpu(dip->di_nblocks) >= mp->m_sb.sb_dblocks) + xchk_ino_set_corrupt(sc, ino); + } + + xchk_inode_flags(sc, dip, ino, mode, flags); + + xchk_inode_extsize(sc, dip, ino, mode, flags); + + nextents = xfs_dfork_data_extents(dip); + naextents = xfs_dfork_attr_extents(dip); + + /* di_nextents */ + fork_recs = XFS_DFORK_DSIZE(dip, mp) / sizeof(struct xfs_bmbt_rec); + switch (dip->di_format) { + case XFS_DINODE_FMT_EXTENTS: + if (nextents > fork_recs) + xchk_ino_set_corrupt(sc, ino); + break; + case XFS_DINODE_FMT_BTREE: + if (nextents <= fork_recs) + xchk_ino_set_corrupt(sc, ino); + break; + default: + if (nextents != 0) + xchk_ino_set_corrupt(sc, ino); + break; + } + + /* di_forkoff */ + if (XFS_DFORK_APTR(dip) >= (char *)dip + mp->m_sb.sb_inodesize) + xchk_ino_set_corrupt(sc, ino); + if (naextents != 0 && dip->di_forkoff == 0) + xchk_ino_set_corrupt(sc, ino); + if (dip->di_forkoff == 0 && dip->di_aformat != XFS_DINODE_FMT_EXTENTS) + xchk_ino_set_corrupt(sc, ino); + + /* di_aformat */ + if (dip->di_aformat != XFS_DINODE_FMT_LOCAL && + dip->di_aformat != XFS_DINODE_FMT_EXTENTS && + dip->di_aformat != XFS_DINODE_FMT_BTREE) + xchk_ino_set_corrupt(sc, ino); + + /* di_anextents */ + fork_recs = XFS_DFORK_ASIZE(dip, mp) / sizeof(struct xfs_bmbt_rec); + switch (dip->di_aformat) { + case XFS_DINODE_FMT_EXTENTS: + if (naextents > fork_recs) + xchk_ino_set_corrupt(sc, ino); + break; + case XFS_DINODE_FMT_BTREE: + if (naextents <= fork_recs) + xchk_ino_set_corrupt(sc, ino); + break; + default: + if (naextents != 0) + xchk_ino_set_corrupt(sc, ino); + } + + if (dip->di_version >= 3) { + xchk_dinode_nsec(sc, ino, dip, dip->di_crtime); + xchk_inode_flags2(sc, dip, ino, mode, flags, flags2); + xchk_inode_cowextsize(sc, dip, ino, mode, flags, + flags2); + } +} + +/* + * Make sure the finobt doesn't think this inode is free. + * We don't have to check the inobt ourselves because we got the inode via + * IGET_UNTRUSTED, which checks the inobt for us. + */ +static void +xchk_inode_xref_finobt( + struct xfs_scrub *sc, + xfs_ino_t ino) +{ + struct xfs_inobt_rec_incore rec; + xfs_agino_t agino; + int has_record; + int error; + + if (!sc->sa.fino_cur || xchk_skip_xref(sc->sm)) + return; + + agino = XFS_INO_TO_AGINO(sc->mp, ino); + + /* + * Try to get the finobt record. If we can't get it, then we're + * in good shape. + */ + error = xfs_inobt_lookup(sc->sa.fino_cur, agino, XFS_LOOKUP_LE, + &has_record); + if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur) || + !has_record) + return; + + error = xfs_inobt_get_rec(sc->sa.fino_cur, &rec, &has_record); + if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur) || + !has_record) + return; + + /* + * Otherwise, make sure this record either doesn't cover this inode, + * or that it does but it's marked present. + */ + if (rec.ir_startino > agino || + rec.ir_startino + XFS_INODES_PER_CHUNK <= agino) + return; + + if (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)) + xchk_btree_xref_set_corrupt(sc, sc->sa.fino_cur, 0); +} + +/* Cross reference the inode fields with the forks. */ +STATIC void +xchk_inode_xref_bmap( + struct xfs_scrub *sc, + struct xfs_dinode *dip) +{ + xfs_extnum_t nextents; + xfs_filblks_t count; + xfs_filblks_t acount; + int error; + + if (xchk_skip_xref(sc->sm)) + return; + + /* Walk all the extents to check nextents/naextents/nblocks. */ + error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK, + &nextents, &count); + if (!xchk_should_check_xref(sc, &error, NULL)) + return; + if (nextents < xfs_dfork_data_extents(dip)) + xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino); + + error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK, + &nextents, &acount); + if (!xchk_should_check_xref(sc, &error, NULL)) + return; + if (nextents != xfs_dfork_attr_extents(dip)) + xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino); + + /* Check nblocks against the inode. */ + if (count + acount != be64_to_cpu(dip->di_nblocks)) + xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino); +} + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_inode_xref( + struct xfs_scrub *sc, + xfs_ino_t ino, + struct xfs_dinode *dip) +{ + xfs_agnumber_t agno; + xfs_agblock_t agbno; + int error; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + agno = XFS_INO_TO_AGNO(sc->mp, ino); + agbno = XFS_INO_TO_AGBNO(sc->mp, ino); + + error = xchk_ag_init_existing(sc, agno, &sc->sa); + if (!xchk_xref_process_error(sc, agno, agbno, &error)) + goto out_free; + + xchk_xref_is_used_space(sc, agbno, 1); + xchk_inode_xref_finobt(sc, ino); + xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_INODES); + xchk_xref_is_not_shared(sc, agbno, 1); + xchk_inode_xref_bmap(sc, dip); + +out_free: + xchk_ag_free(sc, &sc->sa); +} + +/* + * If the reflink iflag disagrees with a scan for shared data fork extents, + * either flag an error (shared extents w/ no flag) or a preen (flag set w/o + * any shared extents). We already checked for reflink iflag set on a non + * reflink filesystem. + */ +static void +xchk_inode_check_reflink_iflag( + struct xfs_scrub *sc, + xfs_ino_t ino) +{ + struct xfs_mount *mp = sc->mp; + bool has_shared; + int error; + + if (!xfs_has_reflink(mp)) + return; + + error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip, + &has_shared); + if (!xchk_xref_process_error(sc, XFS_INO_TO_AGNO(mp, ino), + XFS_INO_TO_AGBNO(mp, ino), &error)) + return; + if (xfs_is_reflink_inode(sc->ip) && !has_shared) + xchk_ino_set_preen(sc, ino); + else if (!xfs_is_reflink_inode(sc->ip) && has_shared) + xchk_ino_set_corrupt(sc, ino); +} + +/* Scrub an inode. */ +int +xchk_inode( + struct xfs_scrub *sc) +{ + struct xfs_dinode di; + int error = 0; + + /* + * If sc->ip is NULL, that means that the setup function called + * xfs_iget to look up the inode. xfs_iget returned a EFSCORRUPTED + * and a NULL inode, so flag the corruption error and return. + */ + if (!sc->ip) { + xchk_ino_set_corrupt(sc, sc->sm->sm_ino); + return 0; + } + + /* Scrub the inode core. */ + xfs_inode_to_disk(sc->ip, &di, 0); + xchk_dinode(sc, &di, sc->ip->i_ino); + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + /* + * Look for discrepancies between file's data blocks and the reflink + * iflag. We already checked the iflag against the file mode when + * we scrubbed the dinode. + */ + if (S_ISREG(VFS_I(sc->ip)->i_mode)) + xchk_inode_check_reflink_iflag(sc, sc->ip->i_ino); + + xchk_inode_xref(sc, sc->ip->i_ino, &di); +out: + return error; +} diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c new file mode 100644 index 000000000..d8dff3fd8 --- /dev/null +++ b/fs/xfs/scrub/parent.c @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_log_format.h" +#include "xfs_inode.h" +#include "xfs_icache.h" +#include "xfs_dir2.h" +#include "xfs_dir2_priv.h" +#include "scrub/scrub.h" +#include "scrub/common.h" + +/* Set us up to scrub parents. */ +int +xchk_setup_parent( + struct xfs_scrub *sc) +{ + return xchk_setup_inode_contents(sc, 0); +} + +/* Parent pointers */ + +/* Look for an entry in a parent pointing to this inode. */ + +struct xchk_parent_ctx { + struct dir_context dc; + struct xfs_scrub *sc; + xfs_ino_t ino; + xfs_nlink_t nlink; + bool cancelled; +}; + +/* Look for a single entry in a directory pointing to an inode. */ +STATIC bool +xchk_parent_actor( + struct dir_context *dc, + const char *name, + int namelen, + loff_t pos, + u64 ino, + unsigned type) +{ + struct xchk_parent_ctx *spc; + int error = 0; + + spc = container_of(dc, struct xchk_parent_ctx, dc); + if (spc->ino == ino) + spc->nlink++; + + /* + * If we're facing a fatal signal, bail out. Store the cancellation + * status separately because the VFS readdir code squashes error codes + * into short directory reads. + */ + if (xchk_should_terminate(spc->sc, &error)) + spc->cancelled = true; + + return !error; +} + +/* Count the number of dentries in the parent dir that point to this inode. */ +STATIC int +xchk_parent_count_parent_dentries( + struct xfs_scrub *sc, + struct xfs_inode *parent, + xfs_nlink_t *nlink) +{ + struct xchk_parent_ctx spc = { + .dc.actor = xchk_parent_actor, + .ino = sc->ip->i_ino, + .sc = sc, + }; + size_t bufsize; + loff_t oldpos; + uint lock_mode; + int error = 0; + + /* + * If there are any blocks, read-ahead block 0 as we're almost + * certain to have the next operation be a read there. This is + * how we guarantee that the parent's extent map has been loaded, + * if there is one. + */ + lock_mode = xfs_ilock_data_map_shared(parent); + if (parent->i_df.if_nextents > 0) + error = xfs_dir3_data_readahead(parent, 0, 0); + xfs_iunlock(parent, lock_mode); + if (error) + return error; + + /* + * Iterate the parent dir to confirm that there is + * exactly one entry pointing back to the inode being + * scanned. + */ + bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, + parent->i_disk_size); + oldpos = 0; + while (true) { + error = xfs_readdir(sc->tp, parent, &spc.dc, bufsize); + if (error) + goto out; + if (spc.cancelled) { + error = -EAGAIN; + goto out; + } + if (oldpos == spc.dc.pos) + break; + oldpos = spc.dc.pos; + } + *nlink = spc.nlink; +out: + return error; +} + +/* + * Given the inode number of the alleged parent of the inode being + * scrubbed, try to validate that the parent has exactly one directory + * entry pointing back to the inode being scrubbed. + */ +STATIC int +xchk_parent_validate( + struct xfs_scrub *sc, + xfs_ino_t dnum, + bool *try_again) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_inode *dp = NULL; + xfs_nlink_t expected_nlink; + xfs_nlink_t nlink; + int error = 0; + + *try_again = false; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + /* '..' must not point to ourselves. */ + if (sc->ip->i_ino == dnum) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + goto out; + } + + /* + * If we're an unlinked directory, the parent /won't/ have a link + * to us. Otherwise, it should have one link. + */ + expected_nlink = VFS_I(sc->ip)->i_nlink == 0 ? 0 : 1; + + /* + * Grab this parent inode. We release the inode before we + * cancel the scrub transaction. Since we're don't know a + * priori that releasing the inode won't trigger eofblocks + * cleanup (which allocates what would be a nested transaction) + * if the parent pointer erroneously points to a file, we + * can't use DONTCACHE here because DONTCACHE inodes can trigger + * immediate inactive cleanup of the inode. + * + * If _iget returns -EINVAL or -ENOENT then the parent inode number is + * garbage and the directory is corrupt. If the _iget returns + * -EFSCORRUPTED or -EFSBADCRC then the parent is corrupt which is a + * cross referencing error. Any other error is an operational error. + */ + error = xfs_iget(mp, sc->tp, dnum, XFS_IGET_UNTRUSTED, 0, &dp); + if (error == -EINVAL || error == -ENOENT) { + error = -EFSCORRUPTED; + xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error); + goto out; + } + if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error)) + goto out; + if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + goto out_rele; + } + + /* + * We prefer to keep the inode locked while we lock and search + * its alleged parent for a forward reference. If we can grab + * the iolock, validate the pointers and we're done. We must + * use nowait here to avoid an ABBA deadlock on the parent and + * the child inodes. + */ + if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) { + error = xchk_parent_count_parent_dentries(sc, dp, &nlink); + if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, + &error)) + goto out_unlock; + if (nlink != expected_nlink) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + goto out_unlock; + } + + /* + * The game changes if we get here. We failed to lock the parent, + * so we're going to try to verify both pointers while only holding + * one lock so as to avoid deadlocking with something that's actually + * trying to traverse down the directory tree. + */ + xfs_iunlock(sc->ip, sc->ilock_flags); + sc->ilock_flags = 0; + error = xchk_ilock_inverted(dp, XFS_IOLOCK_SHARED); + if (error) + goto out_rele; + + /* Go looking for our dentry. */ + error = xchk_parent_count_parent_dentries(sc, dp, &nlink); + if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error)) + goto out_unlock; + + /* Drop the parent lock, relock this inode. */ + xfs_iunlock(dp, XFS_IOLOCK_SHARED); + error = xchk_ilock_inverted(sc->ip, XFS_IOLOCK_EXCL); + if (error) + goto out_rele; + sc->ilock_flags = XFS_IOLOCK_EXCL; + + /* + * If we're an unlinked directory, the parent /won't/ have a link + * to us. Otherwise, it should have one link. We have to re-set + * it here because we dropped the lock on sc->ip. + */ + expected_nlink = VFS_I(sc->ip)->i_nlink == 0 ? 0 : 1; + + /* Look up '..' to see if the inode changed. */ + error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) + goto out_rele; + + /* Drat, parent changed. Try again! */ + if (dnum != dp->i_ino) { + xfs_irele(dp); + *try_again = true; + return 0; + } + xfs_irele(dp); + + /* + * '..' didn't change, so check that there was only one entry + * for us in the parent. + */ + if (nlink != expected_nlink) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + return error; + +out_unlock: + xfs_iunlock(dp, XFS_IOLOCK_SHARED); +out_rele: + xfs_irele(dp); +out: + return error; +} + +/* Scrub a parent pointer. */ +int +xchk_parent( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + xfs_ino_t dnum; + bool try_again; + int tries = 0; + int error = 0; + + /* + * If we're a directory, check that the '..' link points up to + * a directory that has one entry pointing to us. + */ + if (!S_ISDIR(VFS_I(sc->ip)->i_mode)) + return -ENOENT; + + /* We're not a special inode, are we? */ + if (!xfs_verify_dir_ino(mp, sc->ip->i_ino)) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + goto out; + } + + /* + * The VFS grabs a read or write lock via i_rwsem before it reads + * or writes to a directory. If we've gotten this far we've + * already obtained IOLOCK_EXCL, which (since 4.10) is the same as + * getting a write lock on i_rwsem. Therefore, it is safe for us + * to drop the ILOCK here in order to do directory lookups. + */ + sc->ilock_flags &= ~(XFS_ILOCK_EXCL | XFS_MMAPLOCK_EXCL); + xfs_iunlock(sc->ip, XFS_ILOCK_EXCL | XFS_MMAPLOCK_EXCL); + + /* Look up '..' */ + error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) + goto out; + if (!xfs_verify_dir_ino(mp, dnum)) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + goto out; + } + + /* Is this the root dir? Then '..' must point to itself. */ + if (sc->ip == mp->m_rootip) { + if (sc->ip->i_ino != mp->m_sb.sb_rootino || + sc->ip->i_ino != dnum) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + goto out; + } + + do { + error = xchk_parent_validate(sc, dnum, &try_again); + if (error) + goto out; + } while (try_again && ++tries < 20); + + /* + * We gave it our best shot but failed, so mark this scrub + * incomplete. Userspace can decide if it wants to try again. + */ + if (try_again && tries == 20) + xchk_set_incomplete(sc); +out: + /* + * If we failed to lock the parent inode even after a retry, just mark + * this scrub incomplete and return. + */ + if ((sc->flags & XCHK_TRY_HARDER) && error == -EDEADLOCK) { + error = 0; + xchk_set_incomplete(sc); + } + return error; +} diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c new file mode 100644 index 000000000..21b4c9006 --- /dev/null +++ b/fs/xfs/scrub/quota.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_inode.h" +#include "xfs_quota.h" +#include "xfs_qm.h" +#include "scrub/scrub.h" +#include "scrub/common.h" + +/* Convert a scrub type code to a DQ flag, or return 0 if error. */ +static inline xfs_dqtype_t +xchk_quota_to_dqtype( + struct xfs_scrub *sc) +{ + switch (sc->sm->sm_type) { + case XFS_SCRUB_TYPE_UQUOTA: + return XFS_DQTYPE_USER; + case XFS_SCRUB_TYPE_GQUOTA: + return XFS_DQTYPE_GROUP; + case XFS_SCRUB_TYPE_PQUOTA: + return XFS_DQTYPE_PROJ; + default: + return 0; + } +} + +/* Set us up to scrub a quota. */ +int +xchk_setup_quota( + struct xfs_scrub *sc) +{ + xfs_dqtype_t dqtype; + int error; + + if (!XFS_IS_QUOTA_ON(sc->mp)) + return -ENOENT; + + dqtype = xchk_quota_to_dqtype(sc); + if (dqtype == 0) + return -EINVAL; + + if (!xfs_this_quota_on(sc->mp, dqtype)) + return -ENOENT; + + error = xchk_setup_fs(sc); + if (error) + return error; + sc->ip = xfs_quota_inode(sc->mp, dqtype); + xfs_ilock(sc->ip, XFS_ILOCK_EXCL); + sc->ilock_flags = XFS_ILOCK_EXCL; + return 0; +} + +/* Quotas. */ + +struct xchk_quota_info { + struct xfs_scrub *sc; + xfs_dqid_t last_id; +}; + +/* Scrub the fields in an individual quota item. */ +STATIC int +xchk_quota_item( + struct xfs_dquot *dq, + xfs_dqtype_t dqtype, + void *priv) +{ + struct xchk_quota_info *sqi = priv; + struct xfs_scrub *sc = sqi->sc; + struct xfs_mount *mp = sc->mp; + struct xfs_quotainfo *qi = mp->m_quotainfo; + xfs_fileoff_t offset; + xfs_ino_t fs_icount; + int error = 0; + + if (xchk_should_terminate(sc, &error)) + return -ECANCELED; + + /* + * Except for the root dquot, the actual dquot we got must either have + * the same or higher id as we saw before. + */ + offset = dq->q_id / qi->qi_dqperchunk; + if (dq->q_id && dq->q_id <= sqi->last_id) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); + + sqi->last_id = dq->q_id; + + /* + * Warn if the hard limits are larger than the fs. + * Administrators can do this, though in production this seems + * suspect, which is why we flag it for review. + * + * Complain about corruption if the soft limit is greater than + * the hard limit. + */ + if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks) + xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); + if (dq->q_blk.softlimit > dq->q_blk.hardlimit) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); + + if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount) + xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); + if (dq->q_ino.softlimit > dq->q_ino.hardlimit) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); + + if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks) + xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); + if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); + + /* Check the resource counts. */ + fs_icount = percpu_counter_sum(&mp->m_icount); + + /* + * Check that usage doesn't exceed physical limits. However, on + * a reflink filesystem we're allowed to exceed physical space + * if there are no quota limits. + */ + if (xfs_has_reflink(mp)) { + if (mp->m_sb.sb_dblocks < dq->q_blk.count) + xchk_fblock_set_warning(sc, XFS_DATA_FORK, + offset); + } else { + if (mp->m_sb.sb_dblocks < dq->q_blk.count) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, + offset); + } + if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); + + /* + * We can violate the hard limits if the admin suddenly sets a + * lower limit than the actual usage. However, we flag it for + * admin review. + */ + if (dq->q_id == 0) + goto out; + + if (dq->q_blk.hardlimit != 0 && + dq->q_blk.count > dq->q_blk.hardlimit) + xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); + + if (dq->q_ino.hardlimit != 0 && + dq->q_ino.count > dq->q_ino.hardlimit) + xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); + + if (dq->q_rtb.hardlimit != 0 && + dq->q_rtb.count > dq->q_rtb.hardlimit) + xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); + +out: + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return -ECANCELED; + + return 0; +} + +/* Check the quota's data fork. */ +STATIC int +xchk_quota_data_fork( + struct xfs_scrub *sc) +{ + struct xfs_bmbt_irec irec = { 0 }; + struct xfs_iext_cursor icur; + struct xfs_quotainfo *qi = sc->mp->m_quotainfo; + struct xfs_ifork *ifp; + xfs_fileoff_t max_dqid_off; + int error = 0; + + /* Invoke the fork scrubber. */ + error = xchk_metadata_inode_forks(sc); + if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + return error; + + /* Check for data fork problems that apply only to quota files. */ + max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk; + ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK); + for_each_xfs_iext(ifp, &icur, &irec) { + if (xchk_should_terminate(sc, &error)) + break; + /* + * delalloc extents or blocks mapped above the highest + * quota id shouldn't happen. + */ + if (isnullstartblock(irec.br_startblock) || + irec.br_startoff > max_dqid_off || + irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, + irec.br_startoff); + break; + } + } + + return error; +} + +/* Scrub all of a quota type's items. */ +int +xchk_quota( + struct xfs_scrub *sc) +{ + struct xchk_quota_info sqi; + struct xfs_mount *mp = sc->mp; + struct xfs_quotainfo *qi = mp->m_quotainfo; + xfs_dqtype_t dqtype; + int error = 0; + + dqtype = xchk_quota_to_dqtype(sc); + + /* Look for problem extents. */ + error = xchk_quota_data_fork(sc); + if (error) + goto out; + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + goto out; + + /* + * Check all the quota items. Now that we've checked the quota inode + * data fork we have to drop ILOCK_EXCL to use the regular dquot + * functions. + */ + xfs_iunlock(sc->ip, sc->ilock_flags); + sc->ilock_flags = 0; + sqi.sc = sc; + sqi.last_id = 0; + error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi); + sc->ilock_flags = XFS_ILOCK_EXCL; + xfs_ilock(sc->ip, sc->ilock_flags); + if (error == -ECANCELED) + error = 0; + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, + sqi.last_id * qi->qi_dqperchunk, &error)) + goto out; + +out: + return error; +} diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c new file mode 100644 index 000000000..a26ee0f24 --- /dev/null +++ b/fs/xfs/scrub/refcount.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_btree.h" +#include "xfs_rmap.h" +#include "xfs_refcount.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_ag.h" + +/* + * Set us up to scrub reference count btrees. + */ +int +xchk_setup_ag_refcountbt( + struct xfs_scrub *sc) +{ + return xchk_setup_ag_btree(sc, false); +} + +/* Reference count btree scrubber. */ + +/* + * Confirming Reference Counts via Reverse Mappings + * + * We want to count the reverse mappings overlapping a refcount record + * (bno, len, refcount), allowing for the possibility that some of the + * overlap may come from smaller adjoining reverse mappings, while some + * comes from single extents which overlap the range entirely. The + * outer loop is as follows: + * + * 1. For all reverse mappings overlapping the refcount extent, + * a. If a given rmap completely overlaps, mark it as seen. + * b. Otherwise, record the fragment (in agbno order) for later + * processing. + * + * Once we've seen all the rmaps, we know that for all blocks in the + * refcount record we want to find $refcount owners and we've already + * visited $seen extents that overlap all the blocks. Therefore, we + * need to find ($refcount - $seen) owners for every block in the + * extent; call that quantity $target_nr. Proceed as follows: + * + * 2. Pull the first $target_nr fragments from the list; all of them + * should start at or before the start of the extent. + * Call this subset of fragments the working set. + * 3. Until there are no more unprocessed fragments, + * a. Find the shortest fragments in the set and remove them. + * b. Note the block number of the end of these fragments. + * c. Pull the same number of fragments from the list. All of these + * fragments should start at the block number recorded in the + * previous step. + * d. Put those fragments in the set. + * 4. Check that there are $target_nr fragments remaining in the list, + * and that they all end at or beyond the end of the refcount extent. + * + * If the refcount is correct, all the check conditions in the algorithm + * should always hold true. If not, the refcount is incorrect. + */ +struct xchk_refcnt_frag { + struct list_head list; + struct xfs_rmap_irec rm; +}; + +struct xchk_refcnt_check { + struct xfs_scrub *sc; + struct list_head fragments; + + /* refcount extent we're examining */ + xfs_agblock_t bno; + xfs_extlen_t len; + xfs_nlink_t refcount; + + /* number of owners seen */ + xfs_nlink_t seen; +}; + +/* + * Decide if the given rmap is large enough that we can redeem it + * towards refcount verification now, or if it's a fragment, in + * which case we'll hang onto it in the hopes that we'll later + * discover that we've collected exactly the correct number of + * fragments as the refcountbt says we should have. + */ +STATIC int +xchk_refcountbt_rmap_check( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xchk_refcnt_check *refchk = priv; + struct xchk_refcnt_frag *frag; + xfs_agblock_t rm_last; + xfs_agblock_t rc_last; + int error = 0; + + if (xchk_should_terminate(refchk->sc, &error)) + return error; + + rm_last = rec->rm_startblock + rec->rm_blockcount - 1; + rc_last = refchk->bno + refchk->len - 1; + + /* Confirm that a single-owner refc extent is a CoW stage. */ + if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) { + xchk_btree_xref_set_corrupt(refchk->sc, cur, 0); + return 0; + } + + if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) { + /* + * The rmap overlaps the refcount record, so we can confirm + * one refcount owner seen. + */ + refchk->seen++; + } else { + /* + * This rmap covers only part of the refcount record, so + * save the fragment for later processing. If the rmapbt + * is healthy each rmap_irec we see will be in agbno order + * so we don't need insertion sort here. + */ + frag = kmem_alloc(sizeof(struct xchk_refcnt_frag), + KM_MAYFAIL); + if (!frag) + return -ENOMEM; + memcpy(&frag->rm, rec, sizeof(frag->rm)); + list_add_tail(&frag->list, &refchk->fragments); + } + + return 0; +} + +/* + * Given a bunch of rmap fragments, iterate through them, keeping + * a running tally of the refcount. If this ever deviates from + * what we expect (which is the refcountbt's refcount minus the + * number of extents that totally covered the refcountbt extent), + * we have a refcountbt error. + */ +STATIC void +xchk_refcountbt_process_rmap_fragments( + struct xchk_refcnt_check *refchk) +{ + struct list_head worklist; + struct xchk_refcnt_frag *frag; + struct xchk_refcnt_frag *n; + xfs_agblock_t bno; + xfs_agblock_t rbno; + xfs_agblock_t next_rbno; + xfs_nlink_t nr; + xfs_nlink_t target_nr; + + target_nr = refchk->refcount - refchk->seen; + if (target_nr == 0) + return; + + /* + * There are (refchk->rc.rc_refcount - refchk->nr refcount) + * references we haven't found yet. Pull that many off the + * fragment list and figure out where the smallest rmap ends + * (and therefore the next rmap should start). All the rmaps + * we pull off should start at or before the beginning of the + * refcount record's range. + */ + INIT_LIST_HEAD(&worklist); + rbno = NULLAGBLOCK; + + /* Make sure the fragments actually /are/ in agbno order. */ + bno = 0; + list_for_each_entry(frag, &refchk->fragments, list) { + if (frag->rm.rm_startblock < bno) + goto done; + bno = frag->rm.rm_startblock; + } + + /* + * Find all the rmaps that start at or before the refc extent, + * and put them on the worklist. + */ + nr = 0; + list_for_each_entry_safe(frag, n, &refchk->fragments, list) { + if (frag->rm.rm_startblock > refchk->bno || nr > target_nr) + break; + bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; + if (bno < rbno) + rbno = bno; + list_move_tail(&frag->list, &worklist); + nr++; + } + + /* + * We should have found exactly $target_nr rmap fragments starting + * at or before the refcount extent. + */ + if (nr != target_nr) + goto done; + + while (!list_empty(&refchk->fragments)) { + /* Discard any fragments ending at rbno from the worklist. */ + nr = 0; + next_rbno = NULLAGBLOCK; + list_for_each_entry_safe(frag, n, &worklist, list) { + bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; + if (bno != rbno) { + if (bno < next_rbno) + next_rbno = bno; + continue; + } + list_del(&frag->list); + kmem_free(frag); + nr++; + } + + /* Try to add nr rmaps starting at rbno to the worklist. */ + list_for_each_entry_safe(frag, n, &refchk->fragments, list) { + bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; + if (frag->rm.rm_startblock != rbno) + goto done; + list_move_tail(&frag->list, &worklist); + if (next_rbno > bno) + next_rbno = bno; + nr--; + if (nr == 0) + break; + } + + /* + * If we get here and nr > 0, this means that we added fewer + * items to the worklist than we discarded because the fragment + * list ran out of items. Therefore, we cannot maintain the + * required refcount. Something is wrong, so we're done. + */ + if (nr) + goto done; + + rbno = next_rbno; + } + + /* + * Make sure the last extent we processed ends at or beyond + * the end of the refcount extent. + */ + if (rbno < refchk->bno + refchk->len) + goto done; + + /* Actually record us having seen the remaining refcount. */ + refchk->seen = refchk->refcount; +done: + /* Delete fragments and work list. */ + list_for_each_entry_safe(frag, n, &worklist, list) { + list_del(&frag->list); + kmem_free(frag); + } + list_for_each_entry_safe(frag, n, &refchk->fragments, list) { + list_del(&frag->list); + kmem_free(frag); + } +} + +/* Use the rmap entries covering this extent to verify the refcount. */ +STATIC void +xchk_refcountbt_xref_rmap( + struct xfs_scrub *sc, + const struct xfs_refcount_irec *irec) +{ + struct xchk_refcnt_check refchk = { + .sc = sc, + .bno = irec->rc_startblock, + .len = irec->rc_blockcount, + .refcount = irec->rc_refcount, + .seen = 0, + }; + struct xfs_rmap_irec low; + struct xfs_rmap_irec high; + struct xchk_refcnt_frag *frag; + struct xchk_refcnt_frag *n; + int error; + + if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) + return; + + /* Cross-reference with the rmapbt to confirm the refcount. */ + memset(&low, 0, sizeof(low)); + low.rm_startblock = irec->rc_startblock; + memset(&high, 0xFF, sizeof(high)); + high.rm_startblock = irec->rc_startblock + irec->rc_blockcount - 1; + + INIT_LIST_HEAD(&refchk.fragments); + error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high, + &xchk_refcountbt_rmap_check, &refchk); + if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) + goto out_free; + + xchk_refcountbt_process_rmap_fragments(&refchk); + if (irec->rc_refcount != refchk.seen) + xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); + +out_free: + list_for_each_entry_safe(frag, n, &refchk.fragments, list) { + list_del(&frag->list); + kmem_free(frag); + } +} + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_refcountbt_xref( + struct xfs_scrub *sc, + const struct xfs_refcount_irec *irec) +{ + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + xchk_xref_is_used_space(sc, irec->rc_startblock, irec->rc_blockcount); + xchk_xref_is_not_inode_chunk(sc, irec->rc_startblock, + irec->rc_blockcount); + xchk_refcountbt_xref_rmap(sc, irec); +} + +/* Scrub a refcountbt record. */ +STATIC int +xchk_refcountbt_rec( + struct xchk_btree *bs, + const union xfs_btree_rec *rec) +{ + struct xfs_refcount_irec irec; + xfs_agblock_t *cow_blocks = bs->private; + struct xfs_perag *pag = bs->cur->bc_ag.pag; + + xfs_refcount_btrec_to_irec(rec, &irec); + + /* Check the domain and refcount are not incompatible. */ + if (!xfs_refcount_check_domain(&irec)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + if (irec.rc_domain == XFS_REFC_DOMAIN_COW) + (*cow_blocks) += irec.rc_blockcount; + + /* Check the extent. */ + if (!xfs_verify_agbext(pag, irec.rc_startblock, irec.rc_blockcount)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + if (irec.rc_refcount == 0) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + xchk_refcountbt_xref(bs->sc, &irec); + + return 0; +} + +/* Make sure we have as many refc blocks as the rmap says. */ +STATIC void +xchk_refcount_xref_rmap( + struct xfs_scrub *sc, + xfs_filblks_t cow_blocks) +{ + xfs_extlen_t refcbt_blocks = 0; + xfs_filblks_t blocks; + int error; + + if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) + return; + + /* Check that we saw as many refcbt blocks as the rmap knows about. */ + error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks); + if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error)) + return; + error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, + &XFS_RMAP_OINFO_REFC, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) + return; + if (blocks != refcbt_blocks) + xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); + + /* Check that we saw as many cow blocks as the rmap knows about. */ + error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, + &XFS_RMAP_OINFO_COW, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) + return; + if (blocks != cow_blocks) + xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); +} + +/* Scrub the refcount btree for some AG. */ +int +xchk_refcountbt( + struct xfs_scrub *sc) +{ + xfs_agblock_t cow_blocks = 0; + int error; + + error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec, + &XFS_RMAP_OINFO_REFC, &cow_blocks); + if (error) + return error; + + xchk_refcount_xref_rmap(sc, cow_blocks); + + return 0; +} + +/* xref check that a cow staging extent is marked in the refcountbt. */ +void +xchk_xref_is_cow_staging( + struct xfs_scrub *sc, + xfs_agblock_t agbno, + xfs_extlen_t len) +{ + struct xfs_refcount_irec rc; + int has_refcount; + int error; + + if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) + return; + + /* Find the CoW staging extent. */ + error = xfs_refcount_lookup_le(sc->sa.refc_cur, XFS_REFC_DOMAIN_COW, + agbno, &has_refcount); + if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) + return; + if (!has_refcount) { + xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); + return; + } + + error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount); + if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) + return; + if (!has_refcount) { + xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); + return; + } + + /* CoW lookup returned a shared extent record? */ + if (rc.rc_domain != XFS_REFC_DOMAIN_COW) + xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); + + /* Must be at least as long as what was passed in */ + if (rc.rc_blockcount < len) + xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); +} + +/* + * xref check that the extent is not shared. Only file data blocks + * can have multiple owners. + */ +void +xchk_xref_is_not_shared( + struct xfs_scrub *sc, + xfs_agblock_t agbno, + xfs_extlen_t len) +{ + bool shared; + int error; + + if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) + return; + + error = xfs_refcount_has_record(sc->sa.refc_cur, XFS_REFC_DOMAIN_SHARED, + agbno, len, &shared); + if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) + return; + if (shared) + xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); +} diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c new file mode 100644 index 000000000..c18bd039f --- /dev/null +++ b/fs/xfs/scrub/repair.c @@ -0,0 +1,963 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc.h" +#include "xfs_ialloc_btree.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "xfs_refcount_btree.h" +#include "xfs_extent_busy.h" +#include "xfs_ag.h" +#include "xfs_ag_resv.h" +#include "xfs_quota.h" +#include "xfs_qm.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/trace.h" +#include "scrub/repair.h" +#include "scrub/bitmap.h" + +/* + * Attempt to repair some metadata, if the metadata is corrupt and userspace + * told us to fix it. This function returns -EAGAIN to mean "re-run scrub", + * and will set *fixed to true if it thinks it repaired anything. + */ +int +xrep_attempt( + struct xfs_scrub *sc) +{ + int error = 0; + + trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error); + + xchk_ag_btcur_free(&sc->sa); + + /* Repair whatever's broken. */ + ASSERT(sc->ops->repair); + error = sc->ops->repair(sc); + trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error); + switch (error) { + case 0: + /* + * Repair succeeded. Commit the fixes and perform a second + * scrub so that we can tell userspace if we fixed the problem. + */ + sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT; + sc->flags |= XREP_ALREADY_FIXED; + return -EAGAIN; + case -EDEADLOCK: + case -EAGAIN: + /* Tell the caller to try again having grabbed all the locks. */ + if (!(sc->flags & XCHK_TRY_HARDER)) { + sc->flags |= XCHK_TRY_HARDER; + return -EAGAIN; + } + /* + * We tried harder but still couldn't grab all the resources + * we needed to fix it. The corruption has not been fixed, + * so report back to userspace. + */ + return -EFSCORRUPTED; + default: + return error; + } +} + +/* + * Complain about unfixable problems in the filesystem. We don't log + * corruptions when IFLAG_REPAIR wasn't set on the assumption that the driver + * program is xfs_scrub, which will call back with IFLAG_REPAIR set if the + * administrator isn't running xfs_scrub in no-repairs mode. + * + * Use this helper function because _ratelimited silently declares a static + * structure to track rate limiting information. + */ +void +xrep_failure( + struct xfs_mount *mp) +{ + xfs_alert_ratelimited(mp, +"Corruption not fixed during online repair. Unmount and run xfs_repair."); +} + +/* + * Repair probe -- userspace uses this to probe if we're willing to repair a + * given mountpoint. + */ +int +xrep_probe( + struct xfs_scrub *sc) +{ + int error = 0; + + if (xchk_should_terminate(sc, &error)) + return error; + + return 0; +} + +/* + * Roll a transaction, keeping the AG headers locked and reinitializing + * the btree cursors. + */ +int +xrep_roll_ag_trans( + struct xfs_scrub *sc) +{ + int error; + + /* Keep the AG header buffers locked so we can keep going. */ + if (sc->sa.agi_bp) + xfs_trans_bhold(sc->tp, sc->sa.agi_bp); + if (sc->sa.agf_bp) + xfs_trans_bhold(sc->tp, sc->sa.agf_bp); + if (sc->sa.agfl_bp) + xfs_trans_bhold(sc->tp, sc->sa.agfl_bp); + + /* + * Roll the transaction. We still own the buffer and the buffer lock + * regardless of whether or not the roll succeeds. If the roll fails, + * the buffers will be released during teardown on our way out of the + * kernel. If it succeeds, we join them to the new transaction and + * move on. + */ + error = xfs_trans_roll(&sc->tp); + if (error) + return error; + + /* Join AG headers to the new transaction. */ + if (sc->sa.agi_bp) + xfs_trans_bjoin(sc->tp, sc->sa.agi_bp); + if (sc->sa.agf_bp) + xfs_trans_bjoin(sc->tp, sc->sa.agf_bp); + if (sc->sa.agfl_bp) + xfs_trans_bjoin(sc->tp, sc->sa.agfl_bp); + + return 0; +} + +/* + * Does the given AG have enough space to rebuild a btree? Neither AG + * reservation can be critical, and we must have enough space (factoring + * in AG reservations) to construct a whole btree. + */ +bool +xrep_ag_has_space( + struct xfs_perag *pag, + xfs_extlen_t nr_blocks, + enum xfs_ag_resv_type type) +{ + return !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) && + !xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) && + pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks; +} + +/* + * Figure out how many blocks to reserve for an AG repair. We calculate the + * worst case estimate for the number of blocks we'd need to rebuild one of + * any type of per-AG btree. + */ +xfs_extlen_t +xrep_calc_ag_resblks( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_scrub_metadata *sm = sc->sm; + struct xfs_perag *pag; + struct xfs_buf *bp; + xfs_agino_t icount = NULLAGINO; + xfs_extlen_t aglen = NULLAGBLOCK; + xfs_extlen_t usedlen; + xfs_extlen_t freelen; + xfs_extlen_t bnobt_sz; + xfs_extlen_t inobt_sz; + xfs_extlen_t rmapbt_sz; + xfs_extlen_t refcbt_sz; + int error; + + if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)) + return 0; + + pag = xfs_perag_get(mp, sm->sm_agno); + if (pag->pagi_init) { + /* Use in-core icount if possible. */ + icount = pag->pagi_count; + } else { + /* Try to get the actual counters from disk. */ + error = xfs_ialloc_read_agi(pag, NULL, &bp); + if (!error) { + icount = pag->pagi_count; + xfs_buf_relse(bp); + } + } + + /* Now grab the block counters from the AGF. */ + error = xfs_alloc_read_agf(pag, NULL, 0, &bp); + if (error) { + aglen = pag->block_count; + freelen = aglen; + usedlen = aglen; + } else { + struct xfs_agf *agf = bp->b_addr; + + aglen = be32_to_cpu(agf->agf_length); + freelen = be32_to_cpu(agf->agf_freeblks); + usedlen = aglen - freelen; + xfs_buf_relse(bp); + } + + /* If the icount is impossible, make some worst-case assumptions. */ + if (icount == NULLAGINO || + !xfs_verify_agino(pag, icount)) { + icount = pag->agino_max - pag->agino_min + 1; + } + + /* If the block counts are impossible, make worst-case assumptions. */ + if (aglen == NULLAGBLOCK || + aglen != pag->block_count || + freelen >= aglen) { + aglen = pag->block_count; + freelen = aglen; + usedlen = aglen; + } + xfs_perag_put(pag); + + trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen, + freelen, usedlen); + + /* + * Figure out how many blocks we'd need worst case to rebuild + * each type of btree. Note that we can only rebuild the + * bnobt/cntbt or inobt/finobt as pairs. + */ + bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen); + if (xfs_has_sparseinodes(mp)) + inobt_sz = xfs_iallocbt_calc_size(mp, icount / + XFS_INODES_PER_HOLEMASK_BIT); + else + inobt_sz = xfs_iallocbt_calc_size(mp, icount / + XFS_INODES_PER_CHUNK); + if (xfs_has_finobt(mp)) + inobt_sz *= 2; + if (xfs_has_reflink(mp)) + refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen); + else + refcbt_sz = 0; + if (xfs_has_rmapbt(mp)) { + /* + * Guess how many blocks we need to rebuild the rmapbt. + * For non-reflink filesystems we can't have more records than + * used blocks. However, with reflink it's possible to have + * more than one rmap record per AG block. We don't know how + * many rmaps there could be in the AG, so we start off with + * what we hope is an generous over-estimation. + */ + if (xfs_has_reflink(mp)) + rmapbt_sz = xfs_rmapbt_calc_size(mp, + (unsigned long long)aglen * 2); + else + rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen); + } else { + rmapbt_sz = 0; + } + + trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz, + inobt_sz, rmapbt_sz, refcbt_sz); + + return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz)); +} + +/* Allocate a block in an AG. */ +int +xrep_alloc_ag_block( + struct xfs_scrub *sc, + const struct xfs_owner_info *oinfo, + xfs_fsblock_t *fsbno, + enum xfs_ag_resv_type resv) +{ + struct xfs_alloc_arg args = {0}; + xfs_agblock_t bno; + int error; + + switch (resv) { + case XFS_AG_RESV_AGFL: + case XFS_AG_RESV_RMAPBT: + error = xfs_alloc_get_freelist(sc->sa.pag, sc->tp, + sc->sa.agf_bp, &bno, 1); + if (error) + return error; + if (bno == NULLAGBLOCK) + return -ENOSPC; + xfs_extent_busy_reuse(sc->mp, sc->sa.pag, bno, 1, false); + *fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno, bno); + if (resv == XFS_AG_RESV_RMAPBT) + xfs_ag_resv_rmapbt_alloc(sc->mp, sc->sa.pag->pag_agno); + return 0; + default: + break; + } + + args.tp = sc->tp; + args.mp = sc->mp; + args.oinfo = *oinfo; + args.fsbno = XFS_AGB_TO_FSB(args.mp, sc->sa.pag->pag_agno, 0); + args.minlen = 1; + args.maxlen = 1; + args.prod = 1; + args.type = XFS_ALLOCTYPE_THIS_AG; + args.resv = resv; + + error = xfs_alloc_vextent(&args); + if (error) + return error; + if (args.fsbno == NULLFSBLOCK) + return -ENOSPC; + ASSERT(args.len == 1); + *fsbno = args.fsbno; + + return 0; +} + +/* Initialize a new AG btree root block with zero entries. */ +int +xrep_init_btblock( + struct xfs_scrub *sc, + xfs_fsblock_t fsb, + struct xfs_buf **bpp, + xfs_btnum_t btnum, + const struct xfs_buf_ops *ops) +{ + struct xfs_trans *tp = sc->tp; + struct xfs_mount *mp = sc->mp; + struct xfs_buf *bp; + int error; + + trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb), + XFS_FSB_TO_AGBNO(mp, fsb), btnum); + + ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.pag->pag_agno); + error = xfs_trans_get_buf(tp, mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, fsb), XFS_FSB_TO_BB(mp, 1), 0, + &bp); + if (error) + return error; + xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); + xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.pag->pag_agno); + xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF); + xfs_trans_log_buf(tp, bp, 0, BBTOB(bp->b_length) - 1); + bp->b_ops = ops; + *bpp = bp; + + return 0; +} + +/* + * Reconstructing per-AG Btrees + * + * When a space btree is corrupt, we don't bother trying to fix it. Instead, + * we scan secondary space metadata to derive the records that should be in + * the damaged btree, initialize a fresh btree root, and insert the records. + * Note that for rebuilding the rmapbt we scan all the primary data to + * generate the new records. + * + * However, that leaves the matter of removing all the metadata describing the + * old broken structure. For primary metadata we use the rmap data to collect + * every extent with a matching rmap owner (bitmap); we then iterate all other + * metadata structures with the same rmap owner to collect the extents that + * cannot be removed (sublist). We then subtract sublist from bitmap to + * derive the blocks that were used by the old btree. These blocks can be + * reaped. + * + * For rmapbt reconstructions we must use different tactics for extent + * collection. First we iterate all primary metadata (this excludes the old + * rmapbt, obviously) to generate new rmap records. The gaps in the rmap + * records are collected as bitmap. The bnobt records are collected as + * sublist. As with the other btrees we subtract sublist from bitmap, and the + * result (since the rmapbt lives in the free space) are the blocks from the + * old rmapbt. + * + * Disposal of Blocks from Old per-AG Btrees + * + * Now that we've constructed a new btree to replace the damaged one, we want + * to dispose of the blocks that (we think) the old btree was using. + * Previously, we used the rmapbt to collect the extents (bitmap) with the + * rmap owner corresponding to the tree we rebuilt, collected extents for any + * blocks with the same rmap owner that are owned by another data structure + * (sublist), and subtracted sublist from bitmap. In theory the extents + * remaining in bitmap are the old btree's blocks. + * + * Unfortunately, it's possible that the btree was crosslinked with other + * blocks on disk. The rmap data can tell us if there are multiple owners, so + * if the rmapbt says there is an owner of this block other than @oinfo, then + * the block is crosslinked. Remove the reverse mapping and continue. + * + * If there is one rmap record, we can free the block, which removes the + * reverse mapping but doesn't add the block to the free space. Our repair + * strategy is to hope the other metadata objects crosslinked on this block + * will be rebuilt (atop different blocks), thereby removing all the cross + * links. + * + * If there are no rmap records at all, we also free the block. If the btree + * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't + * supposed to be a rmap record and everything is ok. For other btrees there + * had to have been an rmap entry for the block to have ended up on @bitmap, + * so if it's gone now there's something wrong and the fs will shut down. + * + * Note: If there are multiple rmap records with only the same rmap owner as + * the btree we're trying to rebuild and the block is indeed owned by another + * data structure with the same rmap owner, then the block will be in sublist + * and therefore doesn't need disposal. If there are multiple rmap records + * with only the same rmap owner but the block is not owned by something with + * the same rmap owner, the block will be freed. + * + * The caller is responsible for locking the AG headers for the entire rebuild + * operation so that nothing else can sneak in and change the AG state while + * we're not looking. We also assume that the caller already invalidated any + * buffers associated with @bitmap. + */ + +/* + * Invalidate buffers for per-AG btree blocks we're dumping. This function + * is not intended for use with file data repairs; we have bunmapi for that. + */ +int +xrep_invalidate_blocks( + struct xfs_scrub *sc, + struct xbitmap *bitmap) +{ + struct xbitmap_range *bmr; + struct xbitmap_range *n; + struct xfs_buf *bp; + xfs_fsblock_t fsbno; + + /* + * For each block in each extent, see if there's an incore buffer for + * exactly that block; if so, invalidate it. The buffer cache only + * lets us look for one buffer at a time, so we have to look one block + * at a time. Avoid invalidating AG headers and post-EOFS blocks + * because we never own those; and if we can't TRYLOCK the buffer we + * assume it's owned by someone else. + */ + for_each_xbitmap_block(fsbno, bmr, n, bitmap) { + int error; + + /* Skip AG headers and post-EOFS blocks */ + if (!xfs_verify_fsbno(sc->mp, fsbno)) + continue; + error = xfs_buf_incore(sc->mp->m_ddev_targp, + XFS_FSB_TO_DADDR(sc->mp, fsbno), + XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK, &bp); + if (error) + continue; + + xfs_trans_bjoin(sc->tp, bp); + xfs_trans_binval(sc->tp, bp); + } + + return 0; +} + +/* Ensure the freelist is the correct size. */ +int +xrep_fix_freelist( + struct xfs_scrub *sc, + bool can_shrink) +{ + struct xfs_alloc_arg args = {0}; + + args.mp = sc->mp; + args.tp = sc->tp; + args.agno = sc->sa.pag->pag_agno; + args.alignment = 1; + args.pag = sc->sa.pag; + + return xfs_alloc_fix_freelist(&args, + can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK); +} + +/* + * Put a block back on the AGFL. + */ +STATIC int +xrep_put_freelist( + struct xfs_scrub *sc, + xfs_agblock_t agbno) +{ + int error; + + /* Make sure there's space on the freelist. */ + error = xrep_fix_freelist(sc, true); + if (error) + return error; + + /* + * Since we're "freeing" a lost block onto the AGFL, we have to + * create an rmap for the block prior to merging it or else other + * parts will break. + */ + error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1, + &XFS_RMAP_OINFO_AG); + if (error) + return error; + + /* Put the block on the AGFL. */ + error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp, + sc->sa.agfl_bp, agbno, 0); + if (error) + return error; + xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1, + XFS_EXTENT_BUSY_SKIP_DISCARD); + + return 0; +} + +/* Dispose of a single block. */ +STATIC int +xrep_reap_block( + struct xfs_scrub *sc, + xfs_fsblock_t fsbno, + const struct xfs_owner_info *oinfo, + enum xfs_ag_resv_type resv) +{ + struct xfs_btree_cur *cur; + struct xfs_buf *agf_bp = NULL; + xfs_agblock_t agbno; + bool has_other_rmap; + int error; + + agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno); + ASSERT(XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); + + /* + * If we are repairing per-inode metadata, we need to read in the AGF + * buffer. Otherwise, we're repairing a per-AG structure, so reuse + * the AGF buffer that the setup functions already grabbed. + */ + if (sc->ip) { + error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &agf_bp); + if (error) + return error; + } else { + agf_bp = sc->sa.agf_bp; + } + cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, sc->sa.pag); + + /* Can we find any other rmappings? */ + error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap); + xfs_btree_del_cursor(cur, error); + if (error) + goto out_free; + + /* + * If there are other rmappings, this block is cross linked and must + * not be freed. Remove the reverse mapping and move on. Otherwise, + * we were the only owner of the block, so free the extent, which will + * also remove the rmap. + * + * XXX: XFS doesn't support detecting the case where a single block + * metadata structure is crosslinked with a multi-block structure + * because the buffer cache doesn't detect aliasing problems, so we + * can't fix 100% of crosslinking problems (yet). The verifiers will + * blow on writeout, the filesystem will shut down, and the admin gets + * to run xfs_repair. + */ + if (has_other_rmap) + error = xfs_rmap_free(sc->tp, agf_bp, sc->sa.pag, agbno, + 1, oinfo); + else if (resv == XFS_AG_RESV_AGFL) + error = xrep_put_freelist(sc, agbno); + else + error = xfs_free_extent(sc->tp, fsbno, 1, oinfo, resv); + if (agf_bp != sc->sa.agf_bp) + xfs_trans_brelse(sc->tp, agf_bp); + if (error) + return error; + + if (sc->ip) + return xfs_trans_roll_inode(&sc->tp, sc->ip); + return xrep_roll_ag_trans(sc); + +out_free: + if (agf_bp != sc->sa.agf_bp) + xfs_trans_brelse(sc->tp, agf_bp); + return error; +} + +/* Dispose of every block of every extent in the bitmap. */ +int +xrep_reap_extents( + struct xfs_scrub *sc, + struct xbitmap *bitmap, + const struct xfs_owner_info *oinfo, + enum xfs_ag_resv_type type) +{ + struct xbitmap_range *bmr; + struct xbitmap_range *n; + xfs_fsblock_t fsbno; + int error = 0; + + ASSERT(xfs_has_rmapbt(sc->mp)); + + for_each_xbitmap_block(fsbno, bmr, n, bitmap) { + ASSERT(sc->ip != NULL || + XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno); + trace_xrep_dispose_btree_extent(sc->mp, + XFS_FSB_TO_AGNO(sc->mp, fsbno), + XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1); + + error = xrep_reap_block(sc, fsbno, oinfo, type); + if (error) + break; + } + + return error; +} + +/* + * Finding per-AG Btree Roots for AGF/AGI Reconstruction + * + * If the AGF or AGI become slightly corrupted, it may be necessary to rebuild + * the AG headers by using the rmap data to rummage through the AG looking for + * btree roots. This is not guaranteed to work if the AG is heavily damaged + * or the rmap data are corrupt. + * + * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL + * buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the + * AGI is being rebuilt. It must maintain these locks until it's safe for + * other threads to change the btrees' shapes. The caller provides + * information about the btrees to look for by passing in an array of + * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set. + * The (root, height) fields will be set on return if anything is found. The + * last element of the array should have a NULL buf_ops to mark the end of the + * array. + * + * For every rmapbt record matching any of the rmap owners in btree_info, + * read each block referenced by the rmap record. If the block is a btree + * block from this filesystem matching any of the magic numbers and has a + * level higher than what we've already seen, remember the block and the + * height of the tree required to have such a block. When the call completes, + * we return the highest block we've found for each btree description; those + * should be the roots. + */ + +struct xrep_findroot { + struct xfs_scrub *sc; + struct xfs_buf *agfl_bp; + struct xfs_agf *agf; + struct xrep_find_ag_btree *btree_info; +}; + +/* See if our block is in the AGFL. */ +STATIC int +xrep_findroot_agfl_walk( + struct xfs_mount *mp, + xfs_agblock_t bno, + void *priv) +{ + xfs_agblock_t *agbno = priv; + + return (*agbno == bno) ? -ECANCELED : 0; +} + +/* Does this block match the btree information passed in? */ +STATIC int +xrep_findroot_block( + struct xrep_findroot *ri, + struct xrep_find_ag_btree *fab, + uint64_t owner, + xfs_agblock_t agbno, + bool *done_with_block) +{ + struct xfs_mount *mp = ri->sc->mp; + struct xfs_buf *bp; + struct xfs_btree_block *btblock; + xfs_daddr_t daddr; + int block_level; + int error = 0; + + daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.pag->pag_agno, agbno); + + /* + * Blocks in the AGFL have stale contents that might just happen to + * have a matching magic and uuid. We don't want to pull these blocks + * in as part of a tree root, so we have to filter out the AGFL stuff + * here. If the AGFL looks insane we'll just refuse to repair. + */ + if (owner == XFS_RMAP_OWN_AG) { + error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp, + xrep_findroot_agfl_walk, &agbno); + if (error == -ECANCELED) + return 0; + if (error) + return error; + } + + /* + * Read the buffer into memory so that we can see if it's a match for + * our btree type. We have no clue if it is beforehand, and we want to + * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which + * will cause needless disk reads in subsequent calls to this function) + * and logging metadata verifier failures. + * + * Therefore, pass in NULL buffer ops. If the buffer was already in + * memory from some other caller it will already have b_ops assigned. + * If it was in memory from a previous unsuccessful findroot_block + * call, the buffer won't have b_ops but it should be clean and ready + * for us to try to verify if the read call succeeds. The same applies + * if the buffer wasn't in memory at all. + * + * Note: If we never match a btree type with this buffer, it will be + * left in memory with NULL b_ops. This shouldn't be a problem unless + * the buffer gets written. + */ + error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr, + mp->m_bsize, 0, &bp, NULL); + if (error) + return error; + + /* Ensure the block magic matches the btree type we're looking for. */ + btblock = XFS_BUF_TO_BLOCK(bp); + ASSERT(fab->buf_ops->magic[1] != 0); + if (btblock->bb_magic != fab->buf_ops->magic[1]) + goto out; + + /* + * If the buffer already has ops applied and they're not the ones for + * this btree type, we know this block doesn't match the btree and we + * can bail out. + * + * If the buffer ops match ours, someone else has already validated + * the block for us, so we can move on to checking if this is a root + * block candidate. + * + * If the buffer does not have ops, nobody has successfully validated + * the contents and the buffer cannot be dirty. If the magic, uuid, + * and structure match this btree type then we'll move on to checking + * if it's a root block candidate. If there is no match, bail out. + */ + if (bp->b_ops) { + if (bp->b_ops != fab->buf_ops) + goto out; + } else { + ASSERT(!xfs_trans_buf_is_dirty(bp)); + if (!uuid_equal(&btblock->bb_u.s.bb_uuid, + &mp->m_sb.sb_meta_uuid)) + goto out; + /* + * Read verifiers can reference b_ops, so we set the pointer + * here. If the verifier fails we'll reset the buffer state + * to what it was before we touched the buffer. + */ + bp->b_ops = fab->buf_ops; + fab->buf_ops->verify_read(bp); + if (bp->b_error) { + bp->b_ops = NULL; + bp->b_error = 0; + goto out; + } + + /* + * Some read verifiers will (re)set b_ops, so we must be + * careful not to change b_ops after running the verifier. + */ + } + + /* + * This block passes the magic/uuid and verifier tests for this btree + * type. We don't need the caller to try the other tree types. + */ + *done_with_block = true; + + /* + * Compare this btree block's level to the height of the current + * candidate root block. + * + * If the level matches the root we found previously, throw away both + * blocks because there can't be two candidate roots. + * + * If level is lower in the tree than the root we found previously, + * ignore this block. + */ + block_level = xfs_btree_get_level(btblock); + if (block_level + 1 == fab->height) { + fab->root = NULLAGBLOCK; + goto out; + } else if (block_level < fab->height) { + goto out; + } + + /* + * This is the highest block in the tree that we've found so far. + * Update the btree height to reflect what we've learned from this + * block. + */ + fab->height = block_level + 1; + + /* + * If this block doesn't have sibling pointers, then it's the new root + * block candidate. Otherwise, the root will be found farther up the + * tree. + */ + if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) && + btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK)) + fab->root = agbno; + else + fab->root = NULLAGBLOCK; + + trace_xrep_findroot_block(mp, ri->sc->sa.pag->pag_agno, agbno, + be32_to_cpu(btblock->bb_magic), fab->height - 1); +out: + xfs_trans_brelse(ri->sc->tp, bp); + return error; +} + +/* + * Do any of the blocks in this rmap record match one of the btrees we're + * looking for? + */ +STATIC int +xrep_findroot_rmap( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xrep_findroot *ri = priv; + struct xrep_find_ag_btree *fab; + xfs_agblock_t b; + bool done; + int error = 0; + + /* Ignore anything that isn't AG metadata. */ + if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner)) + return 0; + + /* Otherwise scan each block + btree type. */ + for (b = 0; b < rec->rm_blockcount; b++) { + done = false; + for (fab = ri->btree_info; fab->buf_ops; fab++) { + if (rec->rm_owner != fab->rmap_owner) + continue; + error = xrep_findroot_block(ri, fab, + rec->rm_owner, rec->rm_startblock + b, + &done); + if (error) + return error; + if (done) + break; + } + } + + return 0; +} + +/* Find the roots of the per-AG btrees described in btree_info. */ +int +xrep_find_ag_btree_roots( + struct xfs_scrub *sc, + struct xfs_buf *agf_bp, + struct xrep_find_ag_btree *btree_info, + struct xfs_buf *agfl_bp) +{ + struct xfs_mount *mp = sc->mp; + struct xrep_findroot ri; + struct xrep_find_ag_btree *fab; + struct xfs_btree_cur *cur; + int error; + + ASSERT(xfs_buf_islocked(agf_bp)); + ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp)); + + ri.sc = sc; + ri.btree_info = btree_info; + ri.agf = agf_bp->b_addr; + ri.agfl_bp = agfl_bp; + for (fab = btree_info; fab->buf_ops; fab++) { + ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG); + ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner)); + fab->root = NULLAGBLOCK; + fab->height = 0; + } + + cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); + error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri); + xfs_btree_del_cursor(cur, error); + + return error; +} + +/* Force a quotacheck the next time we mount. */ +void +xrep_force_quotacheck( + struct xfs_scrub *sc, + xfs_dqtype_t type) +{ + uint flag; + + flag = xfs_quota_chkd_flag(type); + if (!(flag & sc->mp->m_qflags)) + return; + + mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock); + sc->mp->m_qflags &= ~flag; + spin_lock(&sc->mp->m_sb_lock); + sc->mp->m_sb.sb_qflags &= ~flag; + spin_unlock(&sc->mp->m_sb_lock); + xfs_log_sb(sc->tp); + mutex_unlock(&sc->mp->m_quotainfo->qi_quotaofflock); +} + +/* + * Attach dquots to this inode, or schedule quotacheck to fix them. + * + * This function ensures that the appropriate dquots are attached to an inode. + * We cannot allow the dquot code to allocate an on-disk dquot block here + * because we're already in transaction context with the inode locked. The + * on-disk dquot should already exist anyway. If the quota code signals + * corruption or missing quota information, schedule quotacheck, which will + * repair corruptions in the quota metadata. + */ +int +xrep_ino_dqattach( + struct xfs_scrub *sc) +{ + int error; + + error = xfs_qm_dqattach_locked(sc->ip, false); + switch (error) { + case -EFSBADCRC: + case -EFSCORRUPTED: + case -ENOENT: + xfs_err_ratelimited(sc->mp, +"inode %llu repair encountered quota error %d, quotacheck forced.", + (unsigned long long)sc->ip->i_ino, error); + if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot) + xrep_force_quotacheck(sc, XFS_DQTYPE_USER); + if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot) + xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP); + if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot) + xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ); + fallthrough; + case -ESRCH: + error = 0; + break; + default: + break; + } + + return error; +} diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h new file mode 100644 index 000000000..840f74ec4 --- /dev/null +++ b/fs/xfs/scrub/repair.h @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_REPAIR_H__ +#define __XFS_SCRUB_REPAIR_H__ + +#include "xfs_quota_defs.h" + +static inline int xrep_notsupported(struct xfs_scrub *sc) +{ + return -EOPNOTSUPP; +} + +#ifdef CONFIG_XFS_ONLINE_REPAIR + +/* Repair helpers */ + +int xrep_attempt(struct xfs_scrub *sc); +void xrep_failure(struct xfs_mount *mp); +int xrep_roll_ag_trans(struct xfs_scrub *sc); +bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks, + enum xfs_ag_resv_type type); +xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub *sc); +int xrep_alloc_ag_block(struct xfs_scrub *sc, + const struct xfs_owner_info *oinfo, xfs_fsblock_t *fsbno, + enum xfs_ag_resv_type resv); +int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb, + struct xfs_buf **bpp, xfs_btnum_t btnum, + const struct xfs_buf_ops *ops); + +struct xbitmap; + +int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink); +int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xbitmap *btlist); +int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *exlist, + const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type); + +struct xrep_find_ag_btree { + /* in: rmap owner of the btree we're looking for */ + uint64_t rmap_owner; + + /* in: buffer ops */ + const struct xfs_buf_ops *buf_ops; + + /* in: maximum btree height */ + unsigned int maxlevels; + + /* out: the highest btree block found and the tree height */ + xfs_agblock_t root; + unsigned int height; +}; + +int xrep_find_ag_btree_roots(struct xfs_scrub *sc, struct xfs_buf *agf_bp, + struct xrep_find_ag_btree *btree_info, struct xfs_buf *agfl_bp); +void xrep_force_quotacheck(struct xfs_scrub *sc, xfs_dqtype_t type); +int xrep_ino_dqattach(struct xfs_scrub *sc); + +/* Metadata repairers */ + +int xrep_probe(struct xfs_scrub *sc); +int xrep_superblock(struct xfs_scrub *sc); +int xrep_agf(struct xfs_scrub *sc); +int xrep_agfl(struct xfs_scrub *sc); +int xrep_agi(struct xfs_scrub *sc); + +#else + +static inline int +xrep_attempt( + struct xfs_scrub *sc) +{ + return -EOPNOTSUPP; +} + +static inline void xrep_failure(struct xfs_mount *mp) {} + +static inline xfs_extlen_t +xrep_calc_ag_resblks( + struct xfs_scrub *sc) +{ + return 0; +} + +#define xrep_probe xrep_notsupported +#define xrep_superblock xrep_notsupported +#define xrep_agf xrep_notsupported +#define xrep_agfl xrep_notsupported +#define xrep_agi xrep_notsupported + +#endif /* CONFIG_XFS_ONLINE_REPAIR */ + +#endif /* __XFS_SCRUB_REPAIR_H__ */ diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c new file mode 100644 index 000000000..229826b2e --- /dev/null +++ b/fs/xfs/scrub/rmap.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_btree.h" +#include "xfs_rmap.h" +#include "xfs_refcount.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" +#include "xfs_ag.h" + +/* + * Set us up to scrub reverse mapping btrees. + */ +int +xchk_setup_ag_rmapbt( + struct xfs_scrub *sc) +{ + return xchk_setup_ag_btree(sc, false); +} + +/* Reverse-mapping scrubber. */ + +/* Cross-reference a rmap against the refcount btree. */ +STATIC void +xchk_rmapbt_xref_refc( + struct xfs_scrub *sc, + struct xfs_rmap_irec *irec) +{ + xfs_agblock_t fbno; + xfs_extlen_t flen; + bool non_inode; + bool is_bmbt; + bool is_attr; + bool is_unwritten; + int error; + + if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) + return; + + non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner); + is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK; + is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK; + is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN; + + /* If this is shared, must be a data fork extent. */ + error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock, + irec->rm_blockcount, &fbno, &flen, false); + if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) + return; + if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten)) + xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); +} + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_rmapbt_xref( + struct xfs_scrub *sc, + struct xfs_rmap_irec *irec) +{ + xfs_agblock_t agbno = irec->rm_startblock; + xfs_extlen_t len = irec->rm_blockcount; + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + xchk_xref_is_used_space(sc, agbno, len); + if (irec->rm_owner == XFS_RMAP_OWN_INODES) + xchk_xref_is_inode_chunk(sc, agbno, len); + else + xchk_xref_is_not_inode_chunk(sc, agbno, len); + if (irec->rm_owner == XFS_RMAP_OWN_COW) + xchk_xref_is_cow_staging(sc, irec->rm_startblock, + irec->rm_blockcount); + else + xchk_rmapbt_xref_refc(sc, irec); +} + +/* Scrub an rmapbt record. */ +STATIC int +xchk_rmapbt_rec( + struct xchk_btree *bs, + const union xfs_btree_rec *rec) +{ + struct xfs_mount *mp = bs->cur->bc_mp; + struct xfs_rmap_irec irec; + struct xfs_perag *pag = bs->cur->bc_ag.pag; + bool non_inode; + bool is_unwritten; + bool is_bmbt; + bool is_attr; + int error; + + error = xfs_rmap_btrec_to_irec(rec, &irec); + if (!xchk_btree_process_error(bs->sc, bs->cur, 0, &error)) + goto out; + + /* Check extent. */ + if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + if (irec.rm_owner == XFS_RMAP_OWN_FS) { + /* + * xfs_verify_agbno returns false for static fs metadata. + * Since that only exists at the start of the AG, validate + * that by hand. + */ + if (irec.rm_startblock != 0 || + irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + } else { + /* + * Otherwise we must point somewhere past the static metadata + * but before the end of the FS. Run the regular check. + */ + if (!xfs_verify_agbno(pag, irec.rm_startblock) || + !xfs_verify_agbno(pag, irec.rm_startblock + + irec.rm_blockcount - 1)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + } + + /* Check flags. */ + non_inode = XFS_RMAP_NON_INODE_OWNER(irec.rm_owner); + is_bmbt = irec.rm_flags & XFS_RMAP_BMBT_BLOCK; + is_attr = irec.rm_flags & XFS_RMAP_ATTR_FORK; + is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN; + + if (is_bmbt && irec.rm_offset != 0) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + if (non_inode && irec.rm_offset != 0) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + if (is_unwritten && (is_bmbt || non_inode || is_attr)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + if (non_inode && (is_bmbt || is_unwritten || is_attr)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + if (!non_inode) { + if (!xfs_verify_ino(mp, irec.rm_owner)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + } else { + /* Non-inode owner within the magic values? */ + if (irec.rm_owner <= XFS_RMAP_OWN_MIN || + irec.rm_owner > XFS_RMAP_OWN_FS) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + } + + xchk_rmapbt_xref(bs->sc, &irec); +out: + return error; +} + +/* Scrub the rmap btree for some AG. */ +int +xchk_rmapbt( + struct xfs_scrub *sc) +{ + return xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec, + &XFS_RMAP_OINFO_AG, NULL); +} + +/* xref check that the extent is owned by a given owner */ +static inline void +xchk_xref_check_owner( + struct xfs_scrub *sc, + xfs_agblock_t bno, + xfs_extlen_t len, + const struct xfs_owner_info *oinfo, + bool should_have_rmap) +{ + bool has_rmap; + int error; + + if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) + return; + + error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo, + &has_rmap); + if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) + return; + if (has_rmap != should_have_rmap) + xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); +} + +/* xref check that the extent is owned by a given owner */ +void +xchk_xref_is_owned_by( + struct xfs_scrub *sc, + xfs_agblock_t bno, + xfs_extlen_t len, + const struct xfs_owner_info *oinfo) +{ + xchk_xref_check_owner(sc, bno, len, oinfo, true); +} + +/* xref check that the extent is not owned by a given owner */ +void +xchk_xref_is_not_owned_by( + struct xfs_scrub *sc, + xfs_agblock_t bno, + xfs_extlen_t len, + const struct xfs_owner_info *oinfo) +{ + xchk_xref_check_owner(sc, bno, len, oinfo, false); +} + +/* xref check that the extent has no reverse mapping at all */ +void +xchk_xref_has_no_owner( + struct xfs_scrub *sc, + xfs_agblock_t bno, + xfs_extlen_t len) +{ + bool has_rmap; + int error; + + if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) + return; + + error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap); + if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) + return; + if (has_rmap) + xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); +} diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c new file mode 100644 index 000000000..0a3bde64c --- /dev/null +++ b/fs/xfs/scrub/rtbitmap.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_rtalloc.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "scrub/scrub.h" +#include "scrub/common.h" + +/* Set us up with the realtime metadata locked. */ +int +xchk_setup_rt( + struct xfs_scrub *sc) +{ + int error; + + error = xchk_setup_fs(sc); + if (error) + return error; + + sc->ilock_flags = XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP; + sc->ip = sc->mp->m_rbmip; + xfs_ilock(sc->ip, sc->ilock_flags); + + return 0; +} + +/* Realtime bitmap. */ + +/* Scrub a free extent record from the realtime bitmap. */ +STATIC int +xchk_rtbitmap_rec( + struct xfs_mount *mp, + struct xfs_trans *tp, + const struct xfs_rtalloc_rec *rec, + void *priv) +{ + struct xfs_scrub *sc = priv; + xfs_rtblock_t startblock; + xfs_rtblock_t blockcount; + + startblock = rec->ar_startext * mp->m_sb.sb_rextsize; + blockcount = rec->ar_extcount * mp->m_sb.sb_rextsize; + + if (!xfs_verify_rtext(mp, startblock, blockcount)) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + return 0; +} + +/* Make sure the entire rtbitmap file is mapped with written extents. */ +STATIC int +xchk_rtbitmap_check_extents( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_bmbt_irec map; + xfs_rtblock_t off; + int nmap; + int error = 0; + + for (off = 0; off < mp->m_sb.sb_rbmblocks;) { + if (xchk_should_terminate(sc, &error) || + (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + break; + + /* Make sure we have a written extent. */ + nmap = 1; + error = xfs_bmapi_read(mp->m_rbmip, off, + mp->m_sb.sb_rbmblocks - off, &map, &nmap, + XFS_DATA_FORK); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, off, &error)) + break; + + if (nmap != 1 || !xfs_bmap_is_written_extent(&map)) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, off); + break; + } + + off += map.br_blockcount; + } + + return error; +} + +/* Scrub the realtime bitmap. */ +int +xchk_rtbitmap( + struct xfs_scrub *sc) +{ + int error; + + /* Is the size of the rtbitmap correct? */ + if (sc->mp->m_rbmip->i_disk_size != + XFS_FSB_TO_B(sc->mp, sc->mp->m_sb.sb_rbmblocks)) { + xchk_ino_set_corrupt(sc, sc->mp->m_rbmip->i_ino); + return 0; + } + + /* Invoke the fork scrubber. */ + error = xchk_metadata_inode_forks(sc); + if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + return error; + + error = xchk_rtbitmap_check_extents(sc); + if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + return error; + + error = xfs_rtalloc_query_all(sc->mp, sc->tp, xchk_rtbitmap_rec, sc); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) + goto out; + +out: + return error; +} + +/* Scrub the realtime summary. */ +int +xchk_rtsummary( + struct xfs_scrub *sc) +{ + struct xfs_inode *rsumip = sc->mp->m_rsumip; + struct xfs_inode *old_ip = sc->ip; + uint old_ilock_flags = sc->ilock_flags; + int error = 0; + + /* + * We ILOCK'd the rt bitmap ip in the setup routine, now lock the + * rt summary ip in compliance with the rt inode locking rules. + * + * Since we switch sc->ip to rsumip we have to save the old ilock + * flags so that we don't mix up the inode state that @sc tracks. + */ + sc->ip = rsumip; + sc->ilock_flags = XFS_ILOCK_EXCL | XFS_ILOCK_RTSUM; + xfs_ilock(sc->ip, sc->ilock_flags); + + /* Invoke the fork scrubber. */ + error = xchk_metadata_inode_forks(sc); + if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + goto out; + + /* XXX: implement this some day */ + xchk_set_incomplete(sc); +out: + /* Switch back to the rtbitmap inode and lock flags. */ + xfs_iunlock(sc->ip, sc->ilock_flags); + sc->ilock_flags = old_ilock_flags; + sc->ip = old_ip; + return error; +} + + +/* xref check that the extent is not free in the rtbitmap */ +void +xchk_xref_is_used_rt_space( + struct xfs_scrub *sc, + xfs_rtblock_t fsbno, + xfs_extlen_t len) +{ + xfs_rtblock_t startext; + xfs_rtblock_t endext; + xfs_rtblock_t extcount; + bool is_free; + int error; + + if (xchk_skip_xref(sc->sm)) + return; + + startext = fsbno; + endext = fsbno + len - 1; + do_div(startext, sc->mp->m_sb.sb_rextsize); + do_div(endext, sc->mp->m_sb.sb_rextsize); + extcount = endext - startext + 1; + xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP); + error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext, extcount, + &is_free); + if (!xchk_should_check_xref(sc, &error, NULL)) + goto out_unlock; + if (is_free) + xchk_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino); +out_unlock: + xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP); +} diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c new file mode 100644 index 000000000..95132490f --- /dev/null +++ b/fs/xfs/scrub/scrub.c @@ -0,0 +1,566 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_inode.h" +#include "xfs_quota.h" +#include "xfs_qm.h" +#include "xfs_errortag.h" +#include "xfs_error.h" +#include "xfs_scrub.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/trace.h" +#include "scrub/repair.h" +#include "scrub/health.h" + +/* + * Online Scrub and Repair + * + * Traditionally, XFS (the kernel driver) did not know how to check or + * repair on-disk data structures. That task was left to the xfs_check + * and xfs_repair tools, both of which require taking the filesystem + * offline for a thorough but time consuming examination. Online + * scrub & repair, on the other hand, enables us to check the metadata + * for obvious errors while carefully stepping around the filesystem's + * ongoing operations, locking rules, etc. + * + * Given that most XFS metadata consist of records stored in a btree, + * most of the checking functions iterate the btree blocks themselves + * looking for irregularities. When a record block is encountered, each + * record can be checked for obviously bad values. Record values can + * also be cross-referenced against other btrees to look for potential + * misunderstandings between pieces of metadata. + * + * It is expected that the checkers responsible for per-AG metadata + * structures will lock the AG headers (AGI, AGF, AGFL), iterate the + * metadata structure, and perform any relevant cross-referencing before + * unlocking the AG and returning the results to userspace. These + * scrubbers must not keep an AG locked for too long to avoid tying up + * the block and inode allocators. + * + * Block maps and b-trees rooted in an inode present a special challenge + * because they can involve extents from any AG. The general scrubber + * structure of lock -> check -> xref -> unlock still holds, but AG + * locking order rules /must/ be obeyed to avoid deadlocks. The + * ordering rule, of course, is that we must lock in increasing AG + * order. Helper functions are provided to track which AG headers we've + * already locked. If we detect an imminent locking order violation, we + * can signal a potential deadlock, in which case the scrubber can jump + * out to the top level, lock all the AGs in order, and retry the scrub. + * + * For file data (directories, extended attributes, symlinks) scrub, we + * can simply lock the inode and walk the data. For btree data + * (directories and attributes) we follow the same btree-scrubbing + * strategy outlined previously to check the records. + * + * We use a bit of trickery with transactions to avoid buffer deadlocks + * if there is a cycle in the metadata. The basic problem is that + * travelling down a btree involves locking the current buffer at each + * tree level. If a pointer should somehow point back to a buffer that + * we've already examined, we will deadlock due to the second buffer + * locking attempt. Note however that grabbing a buffer in transaction + * context links the locked buffer to the transaction. If we try to + * re-grab the buffer in the context of the same transaction, we avoid + * the second lock attempt and continue. Between the verifier and the + * scrubber, something will notice that something is amiss and report + * the corruption. Therefore, each scrubber will allocate an empty + * transaction, attach buffers to it, and cancel the transaction at the + * end of the scrub run. Cancelling a non-dirty transaction simply + * unlocks the buffers. + * + * There are four pieces of data that scrub can communicate to + * userspace. The first is the error code (errno), which can be used to + * communicate operational errors in performing the scrub. There are + * also three flags that can be set in the scrub context. If the data + * structure itself is corrupt, the CORRUPT flag will be set. If + * the metadata is correct but otherwise suboptimal, the PREEN flag + * will be set. + * + * We perform secondary validation of filesystem metadata by + * cross-referencing every record with all other available metadata. + * For example, for block mapping extents, we verify that there are no + * records in the free space and inode btrees corresponding to that + * space extent and that there is a corresponding entry in the reverse + * mapping btree. Inconsistent metadata is noted by setting the + * XCORRUPT flag; btree query function errors are noted by setting the + * XFAIL flag and deleting the cursor to prevent further attempts to + * cross-reference with a defective btree. + * + * If a piece of metadata proves corrupt or suboptimal, the userspace + * program can ask the kernel to apply some tender loving care (TLC) to + * the metadata object by setting the REPAIR flag and re-calling the + * scrub ioctl. "Corruption" is defined by metadata violating the + * on-disk specification; operations cannot continue if the violation is + * left untreated. It is possible for XFS to continue if an object is + * "suboptimal", however performance may be degraded. Repairs are + * usually performed by rebuilding the metadata entirely out of + * redundant metadata. Optimizing, on the other hand, can sometimes be + * done without rebuilding entire structures. + * + * Generally speaking, the repair code has the following code structure: + * Lock -> scrub -> repair -> commit -> re-lock -> re-scrub -> unlock. + * The first check helps us figure out if we need to rebuild or simply + * optimize the structure so that the rebuild knows what to do. The + * second check evaluates the completeness of the repair; that is what + * is reported to userspace. + * + * A quick note on symbol prefixes: + * - "xfs_" are general XFS symbols. + * - "xchk_" are symbols related to metadata checking. + * - "xrep_" are symbols related to metadata repair. + * - "xfs_scrub_" are symbols that tie online fsck to the rest of XFS. + */ + +/* + * Scrub probe -- userspace uses this to probe if we're willing to scrub + * or repair a given mountpoint. This will be used by xfs_scrub to + * probe the kernel's abilities to scrub (and repair) the metadata. We + * do this by validating the ioctl inputs from userspace, preparing the + * filesystem for a scrub (or a repair) operation, and immediately + * returning to userspace. Userspace can use the returned errno and + * structure state to decide (in broad terms) if scrub/repair are + * supported by the running kernel. + */ +static int +xchk_probe( + struct xfs_scrub *sc) +{ + int error = 0; + + if (xchk_should_terminate(sc, &error)) + return error; + + return 0; +} + +/* Scrub setup and teardown */ + +/* Free all the resources and finish the transactions. */ +STATIC int +xchk_teardown( + struct xfs_scrub *sc, + int error) +{ + struct xfs_inode *ip_in = XFS_I(file_inode(sc->file)); + + xchk_ag_free(sc, &sc->sa); + if (sc->tp) { + if (error == 0 && (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)) + error = xfs_trans_commit(sc->tp); + else + xfs_trans_cancel(sc->tp); + sc->tp = NULL; + } + if (sc->ip) { + if (sc->ilock_flags) + xfs_iunlock(sc->ip, sc->ilock_flags); + if (sc->ip != ip_in && + !xfs_internal_inum(sc->mp, sc->ip->i_ino)) + xfs_irele(sc->ip); + sc->ip = NULL; + } + if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) + mnt_drop_write_file(sc->file); + if (sc->buf) { + kmem_free(sc->buf); + sc->buf = NULL; + } + return error; +} + +/* Scrubbing dispatch. */ + +static const struct xchk_meta_ops meta_scrub_ops[] = { + [XFS_SCRUB_TYPE_PROBE] = { /* ioctl presence test */ + .type = ST_NONE, + .setup = xchk_setup_fs, + .scrub = xchk_probe, + .repair = xrep_probe, + }, + [XFS_SCRUB_TYPE_SB] = { /* superblock */ + .type = ST_PERAG, + .setup = xchk_setup_fs, + .scrub = xchk_superblock, + .repair = xrep_superblock, + }, + [XFS_SCRUB_TYPE_AGF] = { /* agf */ + .type = ST_PERAG, + .setup = xchk_setup_fs, + .scrub = xchk_agf, + .repair = xrep_agf, + }, + [XFS_SCRUB_TYPE_AGFL]= { /* agfl */ + .type = ST_PERAG, + .setup = xchk_setup_fs, + .scrub = xchk_agfl, + .repair = xrep_agfl, + }, + [XFS_SCRUB_TYPE_AGI] = { /* agi */ + .type = ST_PERAG, + .setup = xchk_setup_fs, + .scrub = xchk_agi, + .repair = xrep_agi, + }, + [XFS_SCRUB_TYPE_BNOBT] = { /* bnobt */ + .type = ST_PERAG, + .setup = xchk_setup_ag_allocbt, + .scrub = xchk_bnobt, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_CNTBT] = { /* cntbt */ + .type = ST_PERAG, + .setup = xchk_setup_ag_allocbt, + .scrub = xchk_cntbt, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_INOBT] = { /* inobt */ + .type = ST_PERAG, + .setup = xchk_setup_ag_iallocbt, + .scrub = xchk_inobt, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_FINOBT] = { /* finobt */ + .type = ST_PERAG, + .setup = xchk_setup_ag_iallocbt, + .scrub = xchk_finobt, + .has = xfs_has_finobt, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_RMAPBT] = { /* rmapbt */ + .type = ST_PERAG, + .setup = xchk_setup_ag_rmapbt, + .scrub = xchk_rmapbt, + .has = xfs_has_rmapbt, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_REFCNTBT] = { /* refcountbt */ + .type = ST_PERAG, + .setup = xchk_setup_ag_refcountbt, + .scrub = xchk_refcountbt, + .has = xfs_has_reflink, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_INODE] = { /* inode record */ + .type = ST_INODE, + .setup = xchk_setup_inode, + .scrub = xchk_inode, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_BMBTD] = { /* inode data fork */ + .type = ST_INODE, + .setup = xchk_setup_inode_bmap, + .scrub = xchk_bmap_data, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_BMBTA] = { /* inode attr fork */ + .type = ST_INODE, + .setup = xchk_setup_inode_bmap, + .scrub = xchk_bmap_attr, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_BMBTC] = { /* inode CoW fork */ + .type = ST_INODE, + .setup = xchk_setup_inode_bmap, + .scrub = xchk_bmap_cow, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_DIR] = { /* directory */ + .type = ST_INODE, + .setup = xchk_setup_directory, + .scrub = xchk_directory, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_XATTR] = { /* extended attributes */ + .type = ST_INODE, + .setup = xchk_setup_xattr, + .scrub = xchk_xattr, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_SYMLINK] = { /* symbolic link */ + .type = ST_INODE, + .setup = xchk_setup_symlink, + .scrub = xchk_symlink, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_PARENT] = { /* parent pointers */ + .type = ST_INODE, + .setup = xchk_setup_parent, + .scrub = xchk_parent, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_RTBITMAP] = { /* realtime bitmap */ + .type = ST_FS, + .setup = xchk_setup_rt, + .scrub = xchk_rtbitmap, + .has = xfs_has_realtime, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_RTSUM] = { /* realtime summary */ + .type = ST_FS, + .setup = xchk_setup_rt, + .scrub = xchk_rtsummary, + .has = xfs_has_realtime, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_UQUOTA] = { /* user quota */ + .type = ST_FS, + .setup = xchk_setup_quota, + .scrub = xchk_quota, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_GQUOTA] = { /* group quota */ + .type = ST_FS, + .setup = xchk_setup_quota, + .scrub = xchk_quota, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_PQUOTA] = { /* project quota */ + .type = ST_FS, + .setup = xchk_setup_quota, + .scrub = xchk_quota, + .repair = xrep_notsupported, + }, + [XFS_SCRUB_TYPE_FSCOUNTERS] = { /* fs summary counters */ + .type = ST_FS, + .setup = xchk_setup_fscounters, + .scrub = xchk_fscounters, + .repair = xrep_notsupported, + }, +}; + +static int +xchk_validate_inputs( + struct xfs_mount *mp, + struct xfs_scrub_metadata *sm) +{ + int error; + const struct xchk_meta_ops *ops; + + error = -EINVAL; + /* Check our inputs. */ + sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT; + if (sm->sm_flags & ~XFS_SCRUB_FLAGS_IN) + goto out; + /* sm_reserved[] must be zero */ + if (memchr_inv(sm->sm_reserved, 0, sizeof(sm->sm_reserved))) + goto out; + + error = -ENOENT; + /* Do we know about this type of metadata? */ + if (sm->sm_type >= XFS_SCRUB_TYPE_NR) + goto out; + ops = &meta_scrub_ops[sm->sm_type]; + if (ops->setup == NULL || ops->scrub == NULL) + goto out; + /* Does this fs even support this type of metadata? */ + if (ops->has && !ops->has(mp)) + goto out; + + error = -EINVAL; + /* restricting fields must be appropriate for type */ + switch (ops->type) { + case ST_NONE: + case ST_FS: + if (sm->sm_ino || sm->sm_gen || sm->sm_agno) + goto out; + break; + case ST_PERAG: + if (sm->sm_ino || sm->sm_gen || + sm->sm_agno >= mp->m_sb.sb_agcount) + goto out; + break; + case ST_INODE: + if (sm->sm_agno || (sm->sm_gen && !sm->sm_ino)) + goto out; + break; + default: + goto out; + } + + /* + * We only want to repair read-write v5+ filesystems. Defer the check + * for ops->repair until after our scrub confirms that we need to + * perform repairs so that we avoid failing due to not supporting + * repairing an object that doesn't need repairs. + */ + if (sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) { + error = -EOPNOTSUPP; + if (!xfs_has_crc(mp)) + goto out; + + error = -EROFS; + if (xfs_is_readonly(mp)) + goto out; + } + + error = 0; +out: + return error; +} + +#ifdef CONFIG_XFS_ONLINE_REPAIR +static inline void xchk_postmortem(struct xfs_scrub *sc) +{ + /* + * Userspace asked us to repair something, we repaired it, rescanned + * it, and the rescan says it's still broken. Scream about this in + * the system logs. + */ + if ((sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) && + (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | + XFS_SCRUB_OFLAG_XCORRUPT))) + xrep_failure(sc->mp); +} +#else +static inline void xchk_postmortem(struct xfs_scrub *sc) +{ + /* + * Userspace asked us to scrub something, it's broken, and we have no + * way of fixing it. Scream in the logs. + */ + if (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | + XFS_SCRUB_OFLAG_XCORRUPT)) + xfs_alert_ratelimited(sc->mp, + "Corruption detected during scrub."); +} +#endif /* CONFIG_XFS_ONLINE_REPAIR */ + +/* Dispatch metadata scrubbing. */ +int +xfs_scrub_metadata( + struct file *file, + struct xfs_scrub_metadata *sm) +{ + struct xfs_scrub *sc; + struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount; + int error = 0; + + BUILD_BUG_ON(sizeof(meta_scrub_ops) != + (sizeof(struct xchk_meta_ops) * XFS_SCRUB_TYPE_NR)); + + trace_xchk_start(XFS_I(file_inode(file)), sm, error); + + /* Forbidden if we are shut down or mounted norecovery. */ + error = -ESHUTDOWN; + if (xfs_is_shutdown(mp)) + goto out; + error = -ENOTRECOVERABLE; + if (xfs_has_norecovery(mp)) + goto out; + + error = xchk_validate_inputs(mp, sm); + if (error) + goto out; + + xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SCRUB, + "EXPERIMENTAL online scrub feature in use. Use at your own risk!"); + + sc = kmem_zalloc(sizeof(struct xfs_scrub), KM_NOFS | KM_MAYFAIL); + if (!sc) { + error = -ENOMEM; + goto out; + } + + sc->mp = mp; + sc->file = file; + sc->sm = sm; + sc->ops = &meta_scrub_ops[sm->sm_type]; + sc->sick_mask = xchk_health_mask_for_scrub_type(sm->sm_type); +retry_op: + /* + * When repairs are allowed, prevent freezing or readonly remount while + * scrub is running with a real transaction. + */ + if (sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) { + error = mnt_want_write_file(sc->file); + if (error) + goto out_sc; + } + + /* Set up for the operation. */ + error = sc->ops->setup(sc); + if (error) + goto out_teardown; + + /* Scrub for errors. */ + error = sc->ops->scrub(sc); + if (!(sc->flags & XCHK_TRY_HARDER) && error == -EDEADLOCK) { + /* + * Scrubbers return -EDEADLOCK to mean 'try harder'. + * Tear down everything we hold, then set up again with + * preparation for worst-case scenarios. + */ + error = xchk_teardown(sc, 0); + if (error) + goto out_sc; + sc->flags |= XCHK_TRY_HARDER; + goto retry_op; + } else if (error || (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)) + goto out_teardown; + + xchk_update_health(sc); + + if ((sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) && + !(sc->flags & XREP_ALREADY_FIXED)) { + bool needs_fix; + + /* Let debug users force us into the repair routines. */ + if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_FORCE_SCRUB_REPAIR)) + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; + + needs_fix = (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | + XFS_SCRUB_OFLAG_XCORRUPT | + XFS_SCRUB_OFLAG_PREEN)); + /* + * If userspace asked for a repair but it wasn't necessary, + * report that back to userspace. + */ + if (!needs_fix) { + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_NO_REPAIR_NEEDED; + goto out_nofix; + } + + /* + * If it's broken, userspace wants us to fix it, and we haven't + * already tried to fix it, then attempt a repair. + */ + error = xrep_attempt(sc); + if (error == -EAGAIN) { + /* + * Either the repair function succeeded or it couldn't + * get all the resources it needs; either way, we go + * back to the beginning and call the scrub function. + */ + error = xchk_teardown(sc, 0); + if (error) { + xrep_failure(mp); + goto out_sc; + } + goto retry_op; + } + } + +out_nofix: + xchk_postmortem(sc); +out_teardown: + error = xchk_teardown(sc, error); +out_sc: + kmem_free(sc); +out: + trace_xchk_done(XFS_I(file_inode(file)), sm, error); + if (error == -EFSCORRUPTED || error == -EFSBADCRC) { + sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; + error = 0; + } + return error; +} diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h new file mode 100644 index 000000000..4cb32c27d --- /dev/null +++ b/fs/xfs/scrub/scrub.h @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_SCRUB_H__ +#define __XFS_SCRUB_SCRUB_H__ + +struct xfs_scrub; + +/* Type info and names for the scrub types. */ +enum xchk_type { + ST_NONE = 1, /* disabled */ + ST_PERAG, /* per-AG metadata */ + ST_FS, /* per-FS metadata */ + ST_INODE, /* per-inode metadata */ +}; + +struct xchk_meta_ops { + /* Acquire whatever resources are needed for the operation. */ + int (*setup)(struct xfs_scrub *sc); + + /* Examine metadata for errors. */ + int (*scrub)(struct xfs_scrub *); + + /* Repair or optimize the metadata. */ + int (*repair)(struct xfs_scrub *); + + /* Decide if we even have this piece of metadata. */ + bool (*has)(struct xfs_mount *); + + /* type describing required/allowed inputs */ + enum xchk_type type; +}; + +/* Buffer pointers and btree cursors for an entire AG. */ +struct xchk_ag { + struct xfs_perag *pag; + + /* AG btree roots */ + struct xfs_buf *agf_bp; + struct xfs_buf *agfl_bp; + struct xfs_buf *agi_bp; + + /* AG btrees */ + struct xfs_btree_cur *bno_cur; + struct xfs_btree_cur *cnt_cur; + struct xfs_btree_cur *ino_cur; + struct xfs_btree_cur *fino_cur; + struct xfs_btree_cur *rmap_cur; + struct xfs_btree_cur *refc_cur; +}; + +struct xfs_scrub { + /* General scrub state. */ + struct xfs_mount *mp; + struct xfs_scrub_metadata *sm; + const struct xchk_meta_ops *ops; + struct xfs_trans *tp; + + /* File that scrub was called with. */ + struct file *file; + + /* + * File that is undergoing the scrub operation. This can differ from + * the file that scrub was called with if we're checking file-based fs + * metadata (e.g. rt bitmaps) or if we're doing a scrub-by-handle for + * something that can't be opened directly (e.g. symlinks). + */ + struct xfs_inode *ip; + + void *buf; + uint ilock_flags; + + /* See the XCHK/XREP state flags below. */ + unsigned int flags; + + /* + * The XFS_SICK_* flags that correspond to the metadata being scrubbed + * or repaired. We will use this mask to update the in-core fs health + * status with whatever we find. + */ + unsigned int sick_mask; + + /* State tracking for single-AG operations. */ + struct xchk_ag sa; +}; + +/* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */ +#define XCHK_TRY_HARDER (1 << 0) /* can't get resources, try again */ +#define XREP_ALREADY_FIXED (1 << 31) /* checking our repair work */ + +/* Metadata scrubbers */ +int xchk_tester(struct xfs_scrub *sc); +int xchk_superblock(struct xfs_scrub *sc); +int xchk_agf(struct xfs_scrub *sc); +int xchk_agfl(struct xfs_scrub *sc); +int xchk_agi(struct xfs_scrub *sc); +int xchk_bnobt(struct xfs_scrub *sc); +int xchk_cntbt(struct xfs_scrub *sc); +int xchk_inobt(struct xfs_scrub *sc); +int xchk_finobt(struct xfs_scrub *sc); +int xchk_rmapbt(struct xfs_scrub *sc); +int xchk_refcountbt(struct xfs_scrub *sc); +int xchk_inode(struct xfs_scrub *sc); +int xchk_bmap_data(struct xfs_scrub *sc); +int xchk_bmap_attr(struct xfs_scrub *sc); +int xchk_bmap_cow(struct xfs_scrub *sc); +int xchk_directory(struct xfs_scrub *sc); +int xchk_xattr(struct xfs_scrub *sc); +int xchk_symlink(struct xfs_scrub *sc); +int xchk_parent(struct xfs_scrub *sc); +#ifdef CONFIG_XFS_RT +int xchk_rtbitmap(struct xfs_scrub *sc); +int xchk_rtsummary(struct xfs_scrub *sc); +#else +static inline int +xchk_rtbitmap(struct xfs_scrub *sc) +{ + return -ENOENT; +} +static inline int +xchk_rtsummary(struct xfs_scrub *sc) +{ + return -ENOENT; +} +#endif +#ifdef CONFIG_XFS_QUOTA +int xchk_quota(struct xfs_scrub *sc); +#else +static inline int +xchk_quota(struct xfs_scrub *sc) +{ + return -ENOENT; +} +#endif +int xchk_fscounters(struct xfs_scrub *sc); + +/* cross-referencing helpers */ +void xchk_xref_is_used_space(struct xfs_scrub *sc, xfs_agblock_t agbno, + xfs_extlen_t len); +void xchk_xref_is_not_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno, + xfs_extlen_t len); +void xchk_xref_is_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno, + xfs_extlen_t len); +void xchk_xref_is_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno, + xfs_extlen_t len, const struct xfs_owner_info *oinfo); +void xchk_xref_is_not_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno, + xfs_extlen_t len, const struct xfs_owner_info *oinfo); +void xchk_xref_has_no_owner(struct xfs_scrub *sc, xfs_agblock_t agbno, + xfs_extlen_t len); +void xchk_xref_is_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno, + xfs_extlen_t len); +void xchk_xref_is_not_shared(struct xfs_scrub *sc, xfs_agblock_t bno, + xfs_extlen_t len); +#ifdef CONFIG_XFS_RT +void xchk_xref_is_used_rt_space(struct xfs_scrub *sc, xfs_rtblock_t rtbno, + xfs_extlen_t len); +#else +# define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0) +#endif + +struct xchk_fscounters { + uint64_t icount; + uint64_t ifree; + uint64_t fdblocks; + unsigned long long icount_min; + unsigned long long icount_max; +}; + +#endif /* __XFS_SCRUB_SCRUB_H__ */ diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c new file mode 100644 index 000000000..75311f8da --- /dev/null +++ b/fs/xfs/scrub/symlink.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_log_format.h" +#include "xfs_inode.h" +#include "xfs_symlink.h" +#include "scrub/scrub.h" +#include "scrub/common.h" + +/* Set us up to scrub a symbolic link. */ +int +xchk_setup_symlink( + struct xfs_scrub *sc) +{ + /* Allocate the buffer without the inode lock held. */ + sc->buf = kvzalloc(XFS_SYMLINK_MAXLEN + 1, GFP_KERNEL); + if (!sc->buf) + return -ENOMEM; + + return xchk_setup_inode_contents(sc, 0); +} + +/* Symbolic links. */ + +int +xchk_symlink( + struct xfs_scrub *sc) +{ + struct xfs_inode *ip = sc->ip; + struct xfs_ifork *ifp; + loff_t len; + int error = 0; + + if (!S_ISLNK(VFS_I(ip)->i_mode)) + return -ENOENT; + ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); + len = ip->i_disk_size; + + /* Plausible size? */ + if (len > XFS_SYMLINK_MAXLEN || len <= 0) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + goto out; + } + + /* Inline symlink? */ + if (ifp->if_format == XFS_DINODE_FMT_LOCAL) { + if (len > xfs_inode_data_fork_size(ip) || + len > strnlen(ifp->if_u1.if_data, xfs_inode_data_fork_size(ip))) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + goto out; + } + + /* Remote symlink; must read the contents. */ + error = xfs_readlink_bmap_ilocked(sc->ip, sc->buf); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) + goto out; + if (strnlen(sc->buf, XFS_SYMLINK_MAXLEN) < len) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); +out: + return error; +} diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c new file mode 100644 index 000000000..b5f94676c --- /dev/null +++ b/fs/xfs/scrub/trace.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_inode.h" +#include "xfs_btree.h" +#include "scrub/scrub.h" +#include "xfs_ag.h" + +/* Figure out which block the btree cursor was pointing to. */ +static inline xfs_fsblock_t +xchk_btree_cur_fsbno( + struct xfs_btree_cur *cur, + int level) +{ + if (level < cur->bc_nlevels && cur->bc_levels[level].bp) + return XFS_DADDR_TO_FSB(cur->bc_mp, + xfs_buf_daddr(cur->bc_levels[level].bp)); + + if (level == cur->bc_nlevels - 1 && + (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)) + return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino); + + return NULLFSBLOCK; +} + +/* + * We include this last to have the helpers above available for the trace + * event implementations. + */ +#define CREATE_TRACE_POINTS +#include "scrub/trace.h" diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h new file mode 100644 index 000000000..93ece6df0 --- /dev/null +++ b/fs/xfs/scrub/trace.h @@ -0,0 +1,920 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + * + * NOTE: none of these tracepoints shall be considered a stable kernel ABI + * as they can change at any time. See xfs_trace.h for documentation of + * specific units found in tracepoint output. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM xfs_scrub + +#if !defined(_TRACE_XFS_SCRUB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_XFS_SCRUB_TRACE_H + +#include +#include "xfs_bit.h" + +/* + * ftrace's __print_symbolic requires that all enum values be wrapped in the + * TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace + * ring buffer. Somehow this was only worth mentioning in the ftrace sample + * code. + */ +TRACE_DEFINE_ENUM(XFS_BTNUM_BNOi); +TRACE_DEFINE_ENUM(XFS_BTNUM_CNTi); +TRACE_DEFINE_ENUM(XFS_BTNUM_BMAPi); +TRACE_DEFINE_ENUM(XFS_BTNUM_INOi); +TRACE_DEFINE_ENUM(XFS_BTNUM_FINOi); +TRACE_DEFINE_ENUM(XFS_BTNUM_RMAPi); +TRACE_DEFINE_ENUM(XFS_BTNUM_REFCi); + +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PROBE); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_SB); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_AGF); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_AGFL); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_AGI); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_BNOBT); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_CNTBT); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_INOBT); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_FINOBT); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RMAPBT); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_REFCNTBT); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_INODE); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_BMBTD); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_BMBTA); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_BMBTC); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_DIR); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_XATTR); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_SYMLINK); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PARENT); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RTBITMAP); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RTSUM); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_UQUOTA); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_GQUOTA); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PQUOTA); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_FSCOUNTERS); + +#define XFS_SCRUB_TYPE_STRINGS \ + { XFS_SCRUB_TYPE_PROBE, "probe" }, \ + { XFS_SCRUB_TYPE_SB, "sb" }, \ + { XFS_SCRUB_TYPE_AGF, "agf" }, \ + { XFS_SCRUB_TYPE_AGFL, "agfl" }, \ + { XFS_SCRUB_TYPE_AGI, "agi" }, \ + { XFS_SCRUB_TYPE_BNOBT, "bnobt" }, \ + { XFS_SCRUB_TYPE_CNTBT, "cntbt" }, \ + { XFS_SCRUB_TYPE_INOBT, "inobt" }, \ + { XFS_SCRUB_TYPE_FINOBT, "finobt" }, \ + { XFS_SCRUB_TYPE_RMAPBT, "rmapbt" }, \ + { XFS_SCRUB_TYPE_REFCNTBT, "refcountbt" }, \ + { XFS_SCRUB_TYPE_INODE, "inode" }, \ + { XFS_SCRUB_TYPE_BMBTD, "bmapbtd" }, \ + { XFS_SCRUB_TYPE_BMBTA, "bmapbta" }, \ + { XFS_SCRUB_TYPE_BMBTC, "bmapbtc" }, \ + { XFS_SCRUB_TYPE_DIR, "directory" }, \ + { XFS_SCRUB_TYPE_XATTR, "xattr" }, \ + { XFS_SCRUB_TYPE_SYMLINK, "symlink" }, \ + { XFS_SCRUB_TYPE_PARENT, "parent" }, \ + { XFS_SCRUB_TYPE_RTBITMAP, "rtbitmap" }, \ + { XFS_SCRUB_TYPE_RTSUM, "rtsummary" }, \ + { XFS_SCRUB_TYPE_UQUOTA, "usrquota" }, \ + { XFS_SCRUB_TYPE_GQUOTA, "grpquota" }, \ + { XFS_SCRUB_TYPE_PQUOTA, "prjquota" }, \ + { XFS_SCRUB_TYPE_FSCOUNTERS, "fscounters" } + +#define XFS_SCRUB_FLAG_STRINGS \ + { XFS_SCRUB_IFLAG_REPAIR, "repair" }, \ + { XFS_SCRUB_OFLAG_CORRUPT, "corrupt" }, \ + { XFS_SCRUB_OFLAG_PREEN, "preen" }, \ + { XFS_SCRUB_OFLAG_XFAIL, "xfail" }, \ + { XFS_SCRUB_OFLAG_XCORRUPT, "xcorrupt" }, \ + { XFS_SCRUB_OFLAG_INCOMPLETE, "incomplete" }, \ + { XFS_SCRUB_OFLAG_WARNING, "warning" }, \ + { XFS_SCRUB_OFLAG_NO_REPAIR_NEEDED, "norepair" } + +DECLARE_EVENT_CLASS(xchk_class, + TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm, + int error), + TP_ARGS(ip, sm, error), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(unsigned int, type) + __field(xfs_agnumber_t, agno) + __field(xfs_ino_t, inum) + __field(unsigned int, gen) + __field(unsigned int, flags) + __field(int, error) + ), + TP_fast_assign( + __entry->dev = ip->i_mount->m_super->s_dev; + __entry->ino = ip->i_ino; + __entry->type = sm->sm_type; + __entry->agno = sm->sm_agno; + __entry->inum = sm->sm_ino; + __entry->gen = sm->sm_gen; + __entry->flags = sm->sm_flags; + __entry->error = error; + ), + TP_printk("dev %d:%d ino 0x%llx type %s agno 0x%x inum 0x%llx gen 0x%x flags (%s) error %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __entry->agno, + __entry->inum, + __entry->gen, + __print_flags(__entry->flags, "|", XFS_SCRUB_FLAG_STRINGS), + __entry->error) +) +#define DEFINE_SCRUB_EVENT(name) \ +DEFINE_EVENT(xchk_class, name, \ + TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm, \ + int error), \ + TP_ARGS(ip, sm, error)) + +DEFINE_SCRUB_EVENT(xchk_start); +DEFINE_SCRUB_EVENT(xchk_done); +DEFINE_SCRUB_EVENT(xchk_deadlock_retry); +DEFINE_SCRUB_EVENT(xrep_attempt); +DEFINE_SCRUB_EVENT(xrep_done); + +TRACE_EVENT(xchk_op_error, + TP_PROTO(struct xfs_scrub *sc, xfs_agnumber_t agno, + xfs_agblock_t bno, int error, void *ret_ip), + TP_ARGS(sc, agno, bno, error, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, type) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, bno) + __field(int, error) + __field(void *, ret_ip) + ), + TP_fast_assign( + __entry->dev = sc->mp->m_super->s_dev; + __entry->type = sc->sm->sm_type; + __entry->agno = agno; + __entry->bno = bno; + __entry->error = error; + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d type %s agno 0x%x agbno 0x%x error %d ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __entry->agno, + __entry->bno, + __entry->error, + __entry->ret_ip) +); + +TRACE_EVENT(xchk_file_op_error, + TP_PROTO(struct xfs_scrub *sc, int whichfork, + xfs_fileoff_t offset, int error, void *ret_ip), + TP_ARGS(sc, whichfork, offset, error, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(int, whichfork) + __field(unsigned int, type) + __field(xfs_fileoff_t, offset) + __field(int, error) + __field(void *, ret_ip) + ), + TP_fast_assign( + __entry->dev = sc->ip->i_mount->m_super->s_dev; + __entry->ino = sc->ip->i_ino; + __entry->whichfork = whichfork; + __entry->type = sc->sm->sm_type; + __entry->offset = offset; + __entry->error = error; + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d ino 0x%llx fork %s type %s fileoff 0x%llx error %d ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __entry->offset, + __entry->error, + __entry->ret_ip) +); + +DECLARE_EVENT_CLASS(xchk_block_error_class, + TP_PROTO(struct xfs_scrub *sc, xfs_daddr_t daddr, void *ret_ip), + TP_ARGS(sc, daddr, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, type) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(void *, ret_ip) + ), + TP_fast_assign( + __entry->dev = sc->mp->m_super->s_dev; + __entry->type = sc->sm->sm_type; + __entry->agno = xfs_daddr_to_agno(sc->mp, daddr); + __entry->agbno = xfs_daddr_to_agbno(sc->mp, daddr); + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d type %s agno 0x%x agbno 0x%x ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __entry->agno, + __entry->agbno, + __entry->ret_ip) +) + +#define DEFINE_SCRUB_BLOCK_ERROR_EVENT(name) \ +DEFINE_EVENT(xchk_block_error_class, name, \ + TP_PROTO(struct xfs_scrub *sc, xfs_daddr_t daddr, \ + void *ret_ip), \ + TP_ARGS(sc, daddr, ret_ip)) + +DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_fs_error); +DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_error); +DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_preen); + +DECLARE_EVENT_CLASS(xchk_ino_error_class, + TP_PROTO(struct xfs_scrub *sc, xfs_ino_t ino, void *ret_ip), + TP_ARGS(sc, ino, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(unsigned int, type) + __field(void *, ret_ip) + ), + TP_fast_assign( + __entry->dev = sc->mp->m_super->s_dev; + __entry->ino = ino; + __entry->type = sc->sm->sm_type; + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d ino 0x%llx type %s ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __entry->ret_ip) +) + +#define DEFINE_SCRUB_INO_ERROR_EVENT(name) \ +DEFINE_EVENT(xchk_ino_error_class, name, \ + TP_PROTO(struct xfs_scrub *sc, xfs_ino_t ino, \ + void *ret_ip), \ + TP_ARGS(sc, ino, ret_ip)) + +DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_error); +DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_preen); +DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_warning); + +DECLARE_EVENT_CLASS(xchk_fblock_error_class, + TP_PROTO(struct xfs_scrub *sc, int whichfork, + xfs_fileoff_t offset, void *ret_ip), + TP_ARGS(sc, whichfork, offset, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(int, whichfork) + __field(unsigned int, type) + __field(xfs_fileoff_t, offset) + __field(void *, ret_ip) + ), + TP_fast_assign( + __entry->dev = sc->ip->i_mount->m_super->s_dev; + __entry->ino = sc->ip->i_ino; + __entry->whichfork = whichfork; + __entry->type = sc->sm->sm_type; + __entry->offset = offset; + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d ino 0x%llx fork %s type %s fileoff 0x%llx ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __entry->offset, + __entry->ret_ip) +); + +#define DEFINE_SCRUB_FBLOCK_ERROR_EVENT(name) \ +DEFINE_EVENT(xchk_fblock_error_class, name, \ + TP_PROTO(struct xfs_scrub *sc, int whichfork, \ + xfs_fileoff_t offset, void *ret_ip), \ + TP_ARGS(sc, whichfork, offset, ret_ip)) + +DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xchk_fblock_error); +DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xchk_fblock_warning); + +TRACE_EVENT(xchk_incomplete, + TP_PROTO(struct xfs_scrub *sc, void *ret_ip), + TP_ARGS(sc, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, type) + __field(void *, ret_ip) + ), + TP_fast_assign( + __entry->dev = sc->mp->m_super->s_dev; + __entry->type = sc->sm->sm_type; + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d type %s ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __entry->ret_ip) +); + +TRACE_EVENT(xchk_btree_op_error, + TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur, + int level, int error, void *ret_ip), + TP_ARGS(sc, cur, level, error, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, type) + __field(xfs_btnum_t, btnum) + __field(int, level) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, bno) + __field(int, ptr) + __field(int, error) + __field(void *, ret_ip) + ), + TP_fast_assign( + xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level); + + __entry->dev = sc->mp->m_super->s_dev; + __entry->type = sc->sm->sm_type; + __entry->btnum = cur->bc_btnum; + __entry->level = level; + __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno); + __entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno); + __entry->ptr = cur->bc_levels[level].ptr; + __entry->error = error; + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d type %s btree %s level %d ptr %d agno 0x%x agbno 0x%x error %d ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS), + __entry->level, + __entry->ptr, + __entry->agno, + __entry->bno, + __entry->error, + __entry->ret_ip) +); + +TRACE_EVENT(xchk_ifork_btree_op_error, + TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur, + int level, int error, void *ret_ip), + TP_ARGS(sc, cur, level, error, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(int, whichfork) + __field(unsigned int, type) + __field(xfs_btnum_t, btnum) + __field(int, level) + __field(int, ptr) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, bno) + __field(int, error) + __field(void *, ret_ip) + ), + TP_fast_assign( + xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level); + __entry->dev = sc->mp->m_super->s_dev; + __entry->ino = sc->ip->i_ino; + __entry->whichfork = cur->bc_ino.whichfork; + __entry->type = sc->sm->sm_type; + __entry->btnum = cur->bc_btnum; + __entry->level = level; + __entry->ptr = cur->bc_levels[level].ptr; + __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno); + __entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno); + __entry->error = error; + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d ino 0x%llx fork %s type %s btree %s level %d ptr %d agno 0x%x agbno 0x%x error %d ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS), + __entry->level, + __entry->ptr, + __entry->agno, + __entry->bno, + __entry->error, + __entry->ret_ip) +); + +TRACE_EVENT(xchk_btree_error, + TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur, + int level, void *ret_ip), + TP_ARGS(sc, cur, level, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, type) + __field(xfs_btnum_t, btnum) + __field(int, level) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, bno) + __field(int, ptr) + __field(void *, ret_ip) + ), + TP_fast_assign( + xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level); + __entry->dev = sc->mp->m_super->s_dev; + __entry->type = sc->sm->sm_type; + __entry->btnum = cur->bc_btnum; + __entry->level = level; + __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno); + __entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno); + __entry->ptr = cur->bc_levels[level].ptr; + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d type %s btree %s level %d ptr %d agno 0x%x agbno 0x%x ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS), + __entry->level, + __entry->ptr, + __entry->agno, + __entry->bno, + __entry->ret_ip) +); + +TRACE_EVENT(xchk_ifork_btree_error, + TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur, + int level, void *ret_ip), + TP_ARGS(sc, cur, level, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(int, whichfork) + __field(unsigned int, type) + __field(xfs_btnum_t, btnum) + __field(int, level) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, bno) + __field(int, ptr) + __field(void *, ret_ip) + ), + TP_fast_assign( + xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level); + __entry->dev = sc->mp->m_super->s_dev; + __entry->ino = sc->ip->i_ino; + __entry->whichfork = cur->bc_ino.whichfork; + __entry->type = sc->sm->sm_type; + __entry->btnum = cur->bc_btnum; + __entry->level = level; + __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno); + __entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno); + __entry->ptr = cur->bc_levels[level].ptr; + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d ino 0x%llx fork %s type %s btree %s level %d ptr %d agno 0x%x agbno 0x%x ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS), + __entry->level, + __entry->ptr, + __entry->agno, + __entry->bno, + __entry->ret_ip) +); + +DECLARE_EVENT_CLASS(xchk_sbtree_class, + TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur, + int level), + TP_ARGS(sc, cur, level), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, type) + __field(xfs_btnum_t, btnum) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, bno) + __field(int, level) + __field(int, nlevels) + __field(int, ptr) + ), + TP_fast_assign( + xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level); + + __entry->dev = sc->mp->m_super->s_dev; + __entry->type = sc->sm->sm_type; + __entry->btnum = cur->bc_btnum; + __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno); + __entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno); + __entry->level = level; + __entry->nlevels = cur->bc_nlevels; + __entry->ptr = cur->bc_levels[level].ptr; + ), + TP_printk("dev %d:%d type %s btree %s agno 0x%x agbno 0x%x level %d nlevels %d ptr %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS), + __entry->agno, + __entry->bno, + __entry->level, + __entry->nlevels, + __entry->ptr) +) +#define DEFINE_SCRUB_SBTREE_EVENT(name) \ +DEFINE_EVENT(xchk_sbtree_class, name, \ + TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur, \ + int level), \ + TP_ARGS(sc, cur, level)) + +DEFINE_SCRUB_SBTREE_EVENT(xchk_btree_rec); +DEFINE_SCRUB_SBTREE_EVENT(xchk_btree_key); + +TRACE_EVENT(xchk_xref_error, + TP_PROTO(struct xfs_scrub *sc, int error, void *ret_ip), + TP_ARGS(sc, error, ret_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, type) + __field(int, error) + __field(void *, ret_ip) + ), + TP_fast_assign( + __entry->dev = sc->mp->m_super->s_dev; + __entry->type = sc->sm->sm_type; + __entry->error = error; + __entry->ret_ip = ret_ip; + ), + TP_printk("dev %d:%d type %s xref error %d ret_ip %pS", + MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS), + __entry->error, + __entry->ret_ip) +); + +TRACE_EVENT(xchk_iallocbt_check_cluster, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agino_t startino, xfs_daddr_t map_daddr, + unsigned short map_len, unsigned int chunk_ino, + unsigned int nr_inodes, uint16_t cluster_mask, + uint16_t holemask, unsigned int cluster_ino), + TP_ARGS(mp, agno, startino, map_daddr, map_len, chunk_ino, nr_inodes, + cluster_mask, holemask, cluster_ino), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agino_t, startino) + __field(xfs_daddr_t, map_daddr) + __field(unsigned short, map_len) + __field(unsigned int, chunk_ino) + __field(unsigned int, nr_inodes) + __field(unsigned int, cluster_ino) + __field(uint16_t, cluster_mask) + __field(uint16_t, holemask) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->startino = startino; + __entry->map_daddr = map_daddr; + __entry->map_len = map_len; + __entry->chunk_ino = chunk_ino; + __entry->nr_inodes = nr_inodes; + __entry->cluster_mask = cluster_mask; + __entry->holemask = holemask; + __entry->cluster_ino = cluster_ino; + ), + TP_printk("dev %d:%d agno 0x%x startino 0x%x daddr 0x%llx bbcount 0x%x chunkino 0x%x nr_inodes %u cluster_mask 0x%x holemask 0x%x cluster_ino 0x%x", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->startino, + __entry->map_daddr, + __entry->map_len, + __entry->chunk_ino, + __entry->nr_inodes, + __entry->cluster_mask, + __entry->holemask, + __entry->cluster_ino) +) + +TRACE_EVENT(xchk_fscounters_calc, + TP_PROTO(struct xfs_mount *mp, uint64_t icount, uint64_t ifree, + uint64_t fdblocks, uint64_t delalloc), + TP_ARGS(mp, icount, ifree, fdblocks, delalloc), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int64_t, icount_sb) + __field(uint64_t, icount_calculated) + __field(int64_t, ifree_sb) + __field(uint64_t, ifree_calculated) + __field(int64_t, fdblocks_sb) + __field(uint64_t, fdblocks_calculated) + __field(uint64_t, delalloc) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->icount_sb = mp->m_sb.sb_icount; + __entry->icount_calculated = icount; + __entry->ifree_sb = mp->m_sb.sb_ifree; + __entry->ifree_calculated = ifree; + __entry->fdblocks_sb = mp->m_sb.sb_fdblocks; + __entry->fdblocks_calculated = fdblocks; + __entry->delalloc = delalloc; + ), + TP_printk("dev %d:%d icount %lld:%llu ifree %lld::%llu fdblocks %lld::%llu delalloc %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->icount_sb, + __entry->icount_calculated, + __entry->ifree_sb, + __entry->ifree_calculated, + __entry->fdblocks_sb, + __entry->fdblocks_calculated, + __entry->delalloc) +) + +TRACE_EVENT(xchk_fscounters_within_range, + TP_PROTO(struct xfs_mount *mp, uint64_t expected, int64_t curr_value, + int64_t old_value), + TP_ARGS(mp, expected, curr_value, old_value), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(uint64_t, expected) + __field(int64_t, curr_value) + __field(int64_t, old_value) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->expected = expected; + __entry->curr_value = curr_value; + __entry->old_value = old_value; + ), + TP_printk("dev %d:%d expected %llu curr_value %lld old_value %lld", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->expected, + __entry->curr_value, + __entry->old_value) +) + +/* repair tracepoints */ +#if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) + +DECLARE_EVENT_CLASS(xrep_extent_class, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno, xfs_extlen_t len), + TP_ARGS(mp, agno, agbno, len), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + ), + TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len) +); +#define DEFINE_REPAIR_EXTENT_EVENT(name) \ +DEFINE_EVENT(xrep_extent_class, name, \ + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ + xfs_agblock_t agbno, xfs_extlen_t len), \ + TP_ARGS(mp, agno, agbno, len)) +DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_btree_extent); +DEFINE_REPAIR_EXTENT_EVENT(xrep_agfl_insert); + +DECLARE_EVENT_CLASS(xrep_rmap_class, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno, xfs_extlen_t len, + uint64_t owner, uint64_t offset, unsigned int flags), + TP_ARGS(mp, agno, agbno, len, owner, offset, flags), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + __field(uint64_t, owner) + __field(uint64_t, offset) + __field(unsigned int, flags) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + __entry->owner = owner; + __entry->offset = offset; + __entry->flags = flags; + ), + TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len, + __entry->owner, + __entry->offset, + __entry->flags) +); +#define DEFINE_REPAIR_RMAP_EVENT(name) \ +DEFINE_EVENT(xrep_rmap_class, name, \ + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ + xfs_agblock_t agbno, xfs_extlen_t len, \ + uint64_t owner, uint64_t offset, unsigned int flags), \ + TP_ARGS(mp, agno, agbno, len, owner, offset, flags)) +DEFINE_REPAIR_RMAP_EVENT(xrep_alloc_extent_fn); +DEFINE_REPAIR_RMAP_EVENT(xrep_ialloc_extent_fn); +DEFINE_REPAIR_RMAP_EVENT(xrep_rmap_extent_fn); +DEFINE_REPAIR_RMAP_EVENT(xrep_bmap_extent_fn); + +TRACE_EVENT(xrep_refcount_extent_fn, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + struct xfs_refcount_irec *irec), + TP_ARGS(mp, agno, irec), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, startblock) + __field(xfs_extlen_t, blockcount) + __field(xfs_nlink_t, refcount) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->startblock = irec->rc_startblock; + __entry->blockcount = irec->rc_blockcount; + __entry->refcount = irec->rc_refcount; + ), + TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x refcount %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->startblock, + __entry->blockcount, + __entry->refcount) +) + +TRACE_EVENT(xrep_init_btblock, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, + xfs_btnum_t btnum), + TP_ARGS(mp, agno, agbno, btnum), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(uint32_t, btnum) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->btnum = btnum; + ), + TP_printk("dev %d:%d agno 0x%x agbno 0x%x btree %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS)) +) +TRACE_EVENT(xrep_findroot_block, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, + uint32_t magic, uint16_t level), + TP_ARGS(mp, agno, agbno, magic, level), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(uint32_t, magic) + __field(uint16_t, level) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->magic = magic; + __entry->level = level; + ), + TP_printk("dev %d:%d agno 0x%x agbno 0x%x magic 0x%x level %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->magic, + __entry->level) +) +TRACE_EVENT(xrep_calc_ag_resblks, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agino_t icount, xfs_agblock_t aglen, xfs_agblock_t freelen, + xfs_agblock_t usedlen), + TP_ARGS(mp, agno, icount, aglen, freelen, usedlen), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agino_t, icount) + __field(xfs_agblock_t, aglen) + __field(xfs_agblock_t, freelen) + __field(xfs_agblock_t, usedlen) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->icount = icount; + __entry->aglen = aglen; + __entry->freelen = freelen; + __entry->usedlen = usedlen; + ), + TP_printk("dev %d:%d agno 0x%x icount %u aglen %u freelen %u usedlen %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->icount, + __entry->aglen, + __entry->freelen, + __entry->usedlen) +) +TRACE_EVENT(xrep_calc_ag_resblks_btsize, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t bnobt_sz, xfs_agblock_t inobt_sz, + xfs_agblock_t rmapbt_sz, xfs_agblock_t refcbt_sz), + TP_ARGS(mp, agno, bnobt_sz, inobt_sz, rmapbt_sz, refcbt_sz), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, bnobt_sz) + __field(xfs_agblock_t, inobt_sz) + __field(xfs_agblock_t, rmapbt_sz) + __field(xfs_agblock_t, refcbt_sz) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->bnobt_sz = bnobt_sz; + __entry->inobt_sz = inobt_sz; + __entry->rmapbt_sz = rmapbt_sz; + __entry->refcbt_sz = refcbt_sz; + ), + TP_printk("dev %d:%d agno 0x%x bnobt %u inobt %u rmapbt %u refcountbt %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->bnobt_sz, + __entry->inobt_sz, + __entry->rmapbt_sz, + __entry->refcbt_sz) +) +TRACE_EVENT(xrep_reset_counters, + TP_PROTO(struct xfs_mount *mp), + TP_ARGS(mp), + TP_STRUCT__entry( + __field(dev_t, dev) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + ), + TP_printk("dev %d:%d", + MAJOR(__entry->dev), MINOR(__entry->dev)) +) + +TRACE_EVENT(xrep_ialloc_insert, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agino_t startino, uint16_t holemask, uint8_t count, + uint8_t freecount, uint64_t freemask), + TP_ARGS(mp, agno, startino, holemask, count, freecount, freemask), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agino_t, startino) + __field(uint16_t, holemask) + __field(uint8_t, count) + __field(uint8_t, freecount) + __field(uint64_t, freemask) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->startino = startino; + __entry->holemask = holemask; + __entry->count = count; + __entry->freecount = freecount; + __entry->freemask = freemask; + ), + TP_printk("dev %d:%d agno 0x%x startino 0x%x holemask 0x%x count %u freecount %u freemask 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->startino, + __entry->holemask, + __entry->count, + __entry->freecount, + __entry->freemask) +) + +#endif /* IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) */ + +#endif /* _TRACE_XFS_SCRUB_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE scrub/trace +#include diff --git a/fs/xfs/scrub/xfs_scrub.h b/fs/xfs/scrub/xfs_scrub.h new file mode 100644 index 000000000..2ceae614a --- /dev/null +++ b/fs/xfs/scrub/xfs_scrub.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_H__ +#define __XFS_SCRUB_H__ + +#ifndef CONFIG_XFS_ONLINE_SCRUB +# define xfs_scrub_metadata(file, sm) (-ENOTTY) +#else +int xfs_scrub_metadata(struct file *file, struct xfs_scrub_metadata *sm); +#endif /* CONFIG_XFS_ONLINE_SCRUB */ + +#endif /* __XFS_SCRUB_H__ */ -- cgit v1.2.3