summaryrefslogtreecommitdiff
path: root/fs/xfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/libxfs/xfs_ag.c8
-rw-r--r--fs/xfs/libxfs/xfs_attr.c16
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c55
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h3
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c28
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h11
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c10
-rw-r--r--fs/xfs/scrub/inode.c18
-rw-r--r--fs/xfs/xfs_buf_item_recover.c15
-rw-r--r--fs/xfs/xfs_inode.c13
-rw-r--r--fs/xfs/xfs_inode_item_recover.c39
-rw-r--r--fs/xfs/xfs_ioctl.c27
-rw-r--r--fs/xfs/xfs_log.c251
-rw-r--r--fs/xfs/xfs_log_cil.c13
-rw-r--r--fs/xfs/xfs_log_priv.h16
-rw-r--r--fs/xfs/xfs_rtalloc.c49
-rw-r--r--fs/xfs/xfs_trace.h5
17 files changed, 426 insertions, 151 deletions
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 778ec52cce70..ee9ec0c50bec 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -804,6 +804,14 @@ xfs_ag_shrink_space(
args.fsbno = XFS_AGB_TO_FSB(mp, agno, aglen - delta);
/*
+ * Make sure that the last inode cluster cannot overlap with the new
+ * end of the AG, even if it's sparse.
+ */
+ error = xfs_ialloc_check_shrink(*tpp, agno, agibp, aglen - delta);
+ if (error)
+ return error;
+
+ /*
* Disable perag reservations so it doesn't cause the allocation request
* to fail. We'll reestablish reservation before we return.
*/
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index d9d7d5137b73..191d51725988 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -483,7 +483,7 @@ xfs_attr_set_iter(
if (error)
return error;
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RM_LBLK:
/* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */
dac->dela_state = XFS_DAS_RM_LBLK;
@@ -496,7 +496,7 @@ xfs_attr_set_iter(
return -EAGAIN;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RD_LEAF:
/*
* This is the last step for leaf format. Read the block with
@@ -528,7 +528,7 @@ xfs_attr_set_iter(
return error;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_ALLOC_NODE:
/*
* If there was an out-of-line value, allocate the blocks we
@@ -590,7 +590,7 @@ xfs_attr_set_iter(
if (error)
return error;
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RM_NBLK:
/* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */
dac->dela_state = XFS_DAS_RM_NBLK;
@@ -603,7 +603,7 @@ xfs_attr_set_iter(
return -EAGAIN;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_CLR_FLAG:
/*
* The last state for node format. Look up the old attr and
@@ -1406,7 +1406,7 @@ xfs_attr_remove_iter(
state = dac->da_state;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RMTBLK:
dac->dela_state = XFS_DAS_RMTBLK;
@@ -1441,7 +1441,7 @@ xfs_attr_remove_iter(
return -EAGAIN;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RM_NAME:
/*
* If we came here fresh from a transaction roll, reattach all
@@ -1469,7 +1469,7 @@ xfs_attr_remove_iter(
return -EAGAIN;
}
- /* fallthrough */
+ fallthrough;
case XFS_DAS_RM_SHRINK:
/*
* If the result is small enough, push it all into the inode.
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 57d9cb632983..aaf8805a82df 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -2928,3 +2928,58 @@ xfs_ialloc_calc_rootino(
return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
}
+
+/*
+ * Ensure there are not sparse inode clusters that cross the new EOAG.
+ *
+ * This is a no-op for non-spinode filesystems since clusters are always fully
+ * allocated and checking the bnobt suffices. However, a spinode filesystem
+ * could have a record where the upper inodes are free blocks. If those blocks
+ * were removed from the filesystem, the inode record would extend beyond EOAG,
+ * which will be flagged as corruption.
+ */
+int
+xfs_ialloc_check_shrink(
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ struct xfs_buf *agibp,
+ xfs_agblock_t new_length)
+{
+ struct xfs_inobt_rec_incore rec;
+ struct xfs_btree_cur *cur;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
+ xfs_agino_t agino = XFS_AGB_TO_AGINO(mp, new_length);
+ int has;
+ int error;
+
+ if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
+ return 0;
+
+ pag = xfs_perag_get(mp, agno);
+ cur = xfs_inobt_init_cursor(mp, tp, agibp, pag, XFS_BTNUM_INO);
+
+ /* Look up the inobt record that would correspond to the new EOFS. */
+ error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has);
+ if (error || !has)
+ goto out;
+
+ error = xfs_inobt_get_rec(cur, &rec, &has);
+ if (error)
+ goto out;
+
+ if (!has) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+
+ /* If the record covers inodes that would be beyond EOFS, bail out. */
+ if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) {
+ error = -ENOSPC;
+ goto out;
+ }
+out:
+ xfs_btree_del_cursor(cur, error);
+ xfs_perag_put(pag);
+ return error;
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 9df7c80408ff..9a2112b4ad5e 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -122,4 +122,7 @@ int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
void xfs_ialloc_setup_geometry(struct xfs_mount *mp);
xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit);
+int xfs_ialloc_check_shrink(struct xfs_trans *tp, xfs_agnumber_t agno,
+ struct xfs_buf *agibp, xfs_agblock_t new_length);
+
#endif /* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 04ce361688f7..84ea2e0af9f0 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -592,23 +592,27 @@ xfs_inode_validate_extsize(
/*
* This comment describes a historic gap in this verifier function.
*
- * On older kernels, the extent size hint verifier doesn't check that
- * the extent size hint is an integer multiple of the realtime extent
- * size on a directory with both RTINHERIT and EXTSZINHERIT flags set.
- * The verifier has always enforced the alignment rule for regular
- * files with the REALTIME flag set.
+ * For a directory with both RTINHERIT and EXTSZINHERIT flags set, this
+ * function has never checked that the extent size hint is an integer
+ * multiple of the realtime extent size. Since we allow users to set
+ * this combination on non-rt filesystems /and/ to change the rt
+ * extent size when adding a rt device to a filesystem, the net effect
+ * is that users can configure a filesystem anticipating one rt
+ * geometry and change their minds later. Directories do not use the
+ * extent size hint, so this is harmless for them.
*
* If a directory with a misaligned extent size hint is allowed to
* propagate that hint into a new regular realtime file, the result
* is that the inode cluster buffer verifier will trigger a corruption
- * shutdown the next time it is run.
+ * shutdown the next time it is run, because the verifier has always
+ * enforced the alignment rule for regular files.
*
- * Unfortunately, there could be filesystems with these misconfigured
- * directories in the wild, so we cannot add a check to this verifier
- * at this time because that will result a new source of directory
- * corruption errors when reading an existing filesystem. Instead, we
- * permit the misconfiguration to pass through the verifiers so that
- * callers of this function can correct and mitigate externally.
+ * Because we allow administrators to set a new rt extent size when
+ * adding a rt section, we cannot add a check to this verifier because
+ * that will result a new source of directory corruption errors when
+ * reading an existing filesystem. Instead, we rely on callers to
+ * decide when alignment checks are appropriate, and fix things up as
+ * needed.
*/
if (rt_flag)
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index d548ea4b6aab..2c5bcbc19264 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -411,7 +411,16 @@ struct xfs_log_dinode {
/* start of the extended dinode, writable fields */
uint32_t di_crc; /* CRC of the inode */
uint64_t di_changecount; /* number of attribute changes */
- xfs_lsn_t di_lsn; /* flush sequence */
+
+ /*
+ * The LSN we write to this field during formatting is not a reflection
+ * of the current on-disk LSN. It should never be used for recovery
+ * sequencing, nor should it be recovered into the on-disk inode at all.
+ * See xlog_recover_inode_commit_pass2() and xfs_log_dinode_to_disk()
+ * for details.
+ */
+ xfs_lsn_t di_lsn;
+
uint64_t di_flags2; /* more random flags */
uint32_t di_cowextsize; /* basic cow extent size for file */
uint8_t di_pad2[12]; /* more padding for future expansion */
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 8d595a5c4abd..16f723ebe8dd 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -143,16 +143,14 @@ xfs_trans_log_inode(
}
/*
- * Inode verifiers on older kernels don't check that the extent size
- * hint is an integer multiple of the rt extent size on a directory
- * with both rtinherit and extszinherit flags set. If we're logging a
- * directory that is misconfigured in this way, clear the hint.
+ * Inode verifiers do not check that the extent size hint is an integer
+ * multiple of the rt extent size on a directory with both rtinherit
+ * and extszinherit flags set. If we're logging a directory that is
+ * misconfigured in this way, clear the hint.
*/
if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
(ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
(ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
- xfs_info_once(ip->i_mount,
- "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino);
ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
XFS_DIFLAG_EXTSZINHERIT);
ip->i_extsize = 0;
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 61f90b2c9430..76fbc7ca4cec 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -73,11 +73,25 @@ xchk_inode_extsize(
uint16_t flags)
{
xfs_failaddr_t fa;
+ uint32_t value = be32_to_cpu(dip->di_extsize);
- fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize),
- mode, flags);
+ fa = xfs_inode_validate_extsize(sc->mp, value, mode, flags);
if (fa)
xchk_ino_set_corrupt(sc, ino);
+
+ /*
+ * XFS allows a sysadmin to change the rt extent size when adding a rt
+ * section to a filesystem after formatting. If there are any
+ * directories with extszinherit and rtinherit set, the hint could
+ * become misaligned with the new rextsize. The verifier doesn't check
+ * this, because we allow rtinherit directories even without an rt
+ * device. Flag this as an administrative warning since we will clean
+ * this up eventually.
+ */
+ if ((flags & XFS_DIFLAG_RTINHERIT) &&
+ (flags & XFS_DIFLAG_EXTSZINHERIT) &&
+ value % sc->mp->m_sb.sb_rextsize > 0)
+ xchk_ino_set_warning(sc, ino);
}
/*
diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
index d44e8b4a3391..4775485b4062 100644
--- a/fs/xfs/xfs_buf_item_recover.c
+++ b/fs/xfs/xfs_buf_item_recover.c
@@ -698,7 +698,8 @@ xlog_recover_do_inode_buffer(
static xfs_lsn_t
xlog_recover_get_buf_lsn(
struct xfs_mount *mp,
- struct xfs_buf *bp)
+ struct xfs_buf *bp,
+ struct xfs_buf_log_format *buf_f)
{
uint32_t magic32;
uint16_t magic16;
@@ -706,11 +707,20 @@ xlog_recover_get_buf_lsn(
void *blk = bp->b_addr;
uuid_t *uuid;
xfs_lsn_t lsn = -1;
+ uint16_t blft;
/* v4 filesystems always recover immediately */
if (!xfs_sb_version_hascrc(&mp->m_sb))
goto recover_immediately;
+ /*
+ * realtime bitmap and summary file blocks do not have magic numbers or
+ * UUIDs, so we must recover them immediately.
+ */
+ blft = xfs_blft_from_flags(buf_f);
+ if (blft == XFS_BLFT_RTBITMAP_BUF || blft == XFS_BLFT_RTSUMMARY_BUF)
+ goto recover_immediately;
+
magic32 = be32_to_cpu(*(__be32 *)blk);
switch (magic32) {
case XFS_ABTB_CRC_MAGIC:
@@ -796,6 +806,7 @@ xlog_recover_get_buf_lsn(
switch (magicda) {
case XFS_DIR3_LEAF1_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
+ case XFS_ATTR3_LEAF_MAGIC:
case XFS_DA3_NODE_MAGIC:
lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
@@ -919,7 +930,7 @@ xlog_recover_buf_commit_pass2(
* the verifier will be reset to match whatever recover turns that
* buffer into.
*/
- lsn = xlog_recover_get_buf_lsn(mp, bp);
+ lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f);
if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
trace_xfs_log_recover_buf_skip(log, buf_f);
xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index a835ceb79ba5..990b72ae3635 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2763,6 +2763,19 @@ xfs_remove(
error = xfs_droplink(tp, ip);
if (error)
goto out_trans_cancel;
+
+ /*
+ * Point the unlinked child directory's ".." entry to the root
+ * directory to eliminate back-references to inodes that may
+ * get freed before the child directory is closed. If the fs
+ * gets shrunk, this can lead to dirent inode validation errors.
+ */
+ if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
+ error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
+ tp->t_mountp->m_sb.sb_rootino, 0);
+ if (error)
+ return error;
+ }
} else {
/*
* When removing a non-directory we need to log the parent
diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c
index 7b79518b6c20..e0072a6cd2d3 100644
--- a/fs/xfs/xfs_inode_item_recover.c
+++ b/fs/xfs/xfs_inode_item_recover.c
@@ -145,7 +145,8 @@ xfs_log_dinode_to_disk_ts(
STATIC void
xfs_log_dinode_to_disk(
struct xfs_log_dinode *from,
- struct xfs_dinode *to)
+ struct xfs_dinode *to,
+ xfs_lsn_t lsn)
{
to->di_magic = cpu_to_be16(from->di_magic);
to->di_mode = cpu_to_be16(from->di_mode);
@@ -182,7 +183,7 @@ xfs_log_dinode_to_disk(
to->di_flags2 = cpu_to_be64(from->di_flags2);
to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to->di_ino = cpu_to_be64(from->di_ino);
- to->di_lsn = cpu_to_be64(from->di_lsn);
+ to->di_lsn = cpu_to_be64(lsn);
memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
uuid_copy(&to->di_uuid, &from->di_uuid);
to->di_flushiter = 0;
@@ -261,16 +262,25 @@ xlog_recover_inode_commit_pass2(
}
/*
- * If the inode has an LSN in it, recover the inode only if it's less
- * than the lsn of the transaction we are replaying. Note: we still
- * need to replay an owner change even though the inode is more recent
- * than the transaction as there is no guarantee that all the btree
- * blocks are more recent than this transaction, too.
+ * If the inode has an LSN in it, recover the inode only if the on-disk
+ * inode's LSN is older than the lsn of the transaction we are
+ * replaying. We can have multiple checkpoints with the same start LSN,
+ * so the current LSN being equal to the on-disk LSN doesn't necessarily
+ * mean that the on-disk inode is more recent than the change being
+ * replayed.
+ *
+ * We must check the current_lsn against the on-disk inode
+ * here because the we can't trust the log dinode to contain a valid LSN
+ * (see comment below before replaying the log dinode for details).
+ *
+ * Note: we still need to replay an owner change even though the inode
+ * is more recent than the transaction as there is no guarantee that all
+ * the btree blocks are more recent than this transaction, too.
*/
if (dip->di_version >= 3) {
xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
- if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+ if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) > 0) {
trace_xfs_log_recover_inode_skip(log, in_f);
error = 0;
goto out_owner_change;
@@ -368,8 +378,17 @@ xlog_recover_inode_commit_pass2(
goto out_release;
}
- /* recover the log dinode inode into the on disk inode */
- xfs_log_dinode_to_disk(ldip, dip);
+ /*
+ * Recover the log dinode inode into the on disk inode.
+ *
+ * The LSN in the log dinode is garbage - it can be zero or reflect
+ * stale in-memory runtime state that isn't coherent with the changes
+ * logged in this transaction or the changes written to the on-disk
+ * inode. Hence we write the current lSN into the inode because that
+ * matches what xfs_iflush() would write inode the inode when flushing
+ * the changes in this transaction.
+ */
+ xfs_log_dinode_to_disk(ldip, dip, current_lsn);
fields = in_f->ilf_fields;
if (fields & XFS_ILOG_DEV)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 65270e63c032..16039ea10ac9 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1065,7 +1065,24 @@ xfs_fill_fsxattr(
fileattr_fill_xflags(fa, xfs_ip2xflags(ip));
- fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+ if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) {
+ fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+ } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
+ /*
+ * Don't let a misaligned extent size hint on a directory
+ * escape to userspace if it won't pass the setattr checks
+ * later.
+ */
+ if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+ ip->i_extsize % mp->m_sb.sb_rextsize > 0) {
+ fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE |
+ FS_XFLAG_EXTSZINHERIT);
+ fa->fsx_extsize = 0;
+ } else {
+ fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+ }
+ }
+
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
fa->fsx_projid = ip->i_projid;
@@ -1292,10 +1309,10 @@ xfs_ioctl_setattr_check_extsize(
new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
/*
- * Inode verifiers on older kernels don't check that the extent size
- * hint is an integer multiple of the rt extent size on a directory
- * with both rtinherit and extszinherit flags set. Don't let sysadmins
- * misconfigure directories.
+ * Inode verifiers do not check that the extent size hint is an integer
+ * multiple of the rt extent size on a directory with both rtinherit
+ * and extszinherit flags set. Don't let sysadmins misconfigure
+ * directories.
*/
if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
(new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 36fa2650b081..60ac5fd63f1e 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -78,13 +78,12 @@ xlog_verify_iclog(
STATIC void
xlog_verify_tail_lsn(
struct xlog *log,
- struct xlog_in_core *iclog,
- xfs_lsn_t tail_lsn);
+ struct xlog_in_core *iclog);
#else
#define xlog_verify_dest_ptr(a,b)
#define xlog_verify_grant_tail(a)
#define xlog_verify_iclog(a,b,c)
-#define xlog_verify_tail_lsn(a,b,c)
+#define xlog_verify_tail_lsn(a,b)
#endif
STATIC int
@@ -487,51 +486,80 @@ out_error:
return error;
}
-static bool
-__xlog_state_release_iclog(
- struct xlog *log,
- struct xlog_in_core *iclog)
-{
- lockdep_assert_held(&log->l_icloglock);
-
- if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
- /* update tail before writing to iclog */
- xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
-
- iclog->ic_state = XLOG_STATE_SYNCING;
- iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
- xlog_verify_tail_lsn(log, iclog, tail_lsn);
- /* cycle incremented when incrementing curr_block */
- trace_xlog_iclog_syncing(iclog, _RET_IP_);
- return true;
- }
-
- ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
- return false;
-}
-
/*
* Flush iclog to disk if this is the last reference to the given iclog and the
* it is in the WANT_SYNC state.
+ *
+ * If the caller passes in a non-zero @old_tail_lsn and the current log tail
+ * does not match, there may be metadata on disk that must be persisted before
+ * this iclog is written. To satisfy that requirement, set the
+ * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new
+ * log tail value.
+ *
+ * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
+ * log tail is updated correctly. NEED_FUA indicates that the iclog will be
+ * written to stable storage, and implies that a commit record is contained
+ * within the iclog. We need to ensure that the log tail does not move beyond
+ * the tail that the first commit record in the iclog ordered against, otherwise
+ * correct recovery of that checkpoint becomes dependent on future operations
+ * performed on this iclog.
+ *
+ * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
+ * current tail into iclog. Once the iclog tail is set, future operations must
+ * not modify it, otherwise they potentially violate ordering constraints for
+ * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
+ * the iclog will get zeroed on activation of the iclog after sync, so we
+ * always capture the tail lsn on the iclog on the first NEED_FUA release
+ * regardless of the number of active reference counts on this iclog.
*/
+
int
xlog_state_release_iclog(
struct xlog *log,
- struct xlog_in_core *iclog)
+ struct xlog_in_core *iclog,
+ xfs_lsn_t old_tail_lsn)
{
+ xfs_lsn_t tail_lsn;
lockdep_assert_held(&log->l_icloglock);
trace_xlog_iclog_release(iclog, _RET_IP_);
if (iclog->ic_state == XLOG_STATE_IOERROR)
return -EIO;
- if (atomic_dec_and_test(&iclog->ic_refcnt) &&
- __xlog_state_release_iclog(log, iclog)) {
- spin_unlock(&log->l_icloglock);
- xlog_sync(log, iclog);
- spin_lock(&log->l_icloglock);
+ /*
+ * Grabbing the current log tail needs to be atomic w.r.t. the writing
+ * of the tail LSN into the iclog so we guarantee that the log tail does
+ * not move between deciding if a cache flush is required and writing
+ * the LSN into the iclog below.
+ */
+ if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+ tail_lsn = xlog_assign_tail_lsn(log->l_mp);
+
+ if (old_tail_lsn && tail_lsn != old_tail_lsn)
+ iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
+
+ if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) &&
+ !iclog->ic_header.h_tail_lsn)
+ iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
}
+ if (!atomic_dec_and_test(&iclog->ic_refcnt))
+ return 0;
+
+ if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
+ ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+ return 0;
+ }
+
+ iclog->ic_state = XLOG_STATE_SYNCING;
+ if (!iclog->ic_header.h_tail_lsn)
+ iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+ xlog_verify_tail_lsn(log, iclog);
+ trace_xlog_iclog_syncing(iclog, _RET_IP_);
+
+ spin_unlock(&log->l_icloglock);
+ xlog_sync(log, iclog);
+ spin_lock(&log->l_icloglock);
return 0;
}
@@ -774,6 +802,21 @@ xfs_log_mount_cancel(
}
/*
+ * Flush out the iclog to disk ensuring that device caches are flushed and
+ * the iclog hits stable storage before any completion waiters are woken.
+ */
+static inline int
+xlog_force_iclog(
+ struct xlog_in_core *iclog)
+{
+ atomic_inc(&iclog->ic_refcnt);
+ iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+ if (iclog->ic_state == XLOG_STATE_ACTIVE)
+ xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
+ return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
+}
+
+/*
* Wait for the iclog and all prior iclogs to be written disk as required by the
* log force state machine. Waiting on ic_force_wait ensures iclog completions
* have been ordered and callbacks run before we are woken here, hence
@@ -827,13 +870,6 @@ xlog_write_unmount_record(
/* account for space used by record data */
ticket->t_curr_res -= sizeof(ulf);
- /*
- * For external log devices, we need to flush the data device cache
- * first to ensure all metadata writeback is on stable storage before we
- * stamp the tail LSN into the unmount record.
- */
- if (log->l_targ != log->l_mp->m_ddev_targp)
- blkdev_issue_flush(log->l_targ->bt_bdev);
return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS);
}
@@ -865,18 +901,7 @@ out_err:
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
- atomic_inc(&iclog->ic_refcnt);
- if (iclog->ic_state == XLOG_STATE_ACTIVE)
- xlog_state_switch_iclogs(log, iclog, 0);
- else
- ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
- iclog->ic_state == XLOG_STATE_IOERROR);
- /*
- * Ensure the journal is fully flushed and on stable storage once the
- * iclog containing the unmount record is written.
- */
- iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_force_iclog(iclog);
xlog_wait_on_iclog(iclog);
if (tic) {
@@ -1796,10 +1821,20 @@ xlog_write_iclog(
* metadata writeback and causing priority inversions.
*/
iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE;
- if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH)
+ if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
+ /*
+ * For external log devices, we also need to flush the data
+ * device cache first to ensure all metadata writeback covered
+ * by the LSN in this iclog is on stable storage. This is slow,
+ * but it *must* complete before we issue the external log IO.
+ */
+ if (log->l_targ != log->l_mp->m_ddev_targp)
+ blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev);
+ }
if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
iclog->ic_bio.bi_opf |= REQ_FUA;
+
iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
@@ -2310,7 +2345,7 @@ xlog_write_copy_finish(
return 0;
release_iclog:
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, 0);
spin_unlock(&log->l_icloglock);
return error;
}
@@ -2529,7 +2564,7 @@ next_lv:
ASSERT(optype & XLOG_COMMIT_TRANS);
*commit_iclog = iclog;
} else {
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, 0);
}
spin_unlock(&log->l_icloglock);
@@ -2567,6 +2602,7 @@ xlog_state_activate_iclog(
memset(iclog->ic_header.h_cycle_data, 0,
sizeof(iclog->ic_header.h_cycle_data));
iclog->ic_header.h_lsn = 0;
+ iclog->ic_header.h_tail_lsn = 0;
}
/*
@@ -2967,7 +3003,7 @@ restart:
* reference to the iclog.
*/
if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, 0);
spin_unlock(&log->l_icloglock);
if (error)
return error;
@@ -3132,6 +3168,35 @@ xlog_state_switch_iclogs(
}
/*
+ * Force the iclog to disk and check if the iclog has been completed before
+ * xlog_force_iclog() returns. This can happen on synchronous (e.g.
+ * pmem) or fast async storage because we drop the icloglock to issue the IO.
+ * If completion has already occurred, tell the caller so that it can avoid an
+ * unnecessary wait on the iclog.
+ */
+static int
+xlog_force_and_check_iclog(
+ struct xlog_in_core *iclog,
+ bool *completed)
+{
+ xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
+ int error;
+
+ *completed = false;
+ error = xlog_force_iclog(iclog);
+ if (error)
+ return error;
+
+ /*
+ * If the iclog has already been completed and reused the header LSN
+ * will have been rewritten by completion
+ */
+ if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
+ *completed = true;
+ return 0;
+}
+
+/*
* Write out all data in the in-core log as of this exact moment in time.
*
* Data may be written to the in-core log during this call. However,
@@ -3165,7 +3230,6 @@ xfs_log_force(
{
struct xlog *log = mp->m_log;
struct xlog_in_core *iclog;
- xfs_lsn_t lsn;
XFS_STATS_INC(mp, xs_log_force);
trace_xfs_log_force(mp, 0, _RET_IP_);
@@ -3193,39 +3257,33 @@ xfs_log_force(
iclog = iclog->ic_prev;
} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
if (atomic_read(&iclog->ic_refcnt) == 0) {
- /*
- * We are the only one with access to this iclog.
- *
- * Flush it out now. There should be a roundoff of zero
- * to show that someone has already taken care of the
- * roundoff from the previous sync.
- */
- atomic_inc(&iclog->ic_refcnt);
- lsn = be64_to_cpu(iclog->ic_header.h_lsn);
- xlog_state_switch_iclogs(log, iclog, 0);
- if (xlog_state_release_iclog(log, iclog))
+ /* We have exclusive access to this iclog. */
+ bool completed;
+
+ if (xlog_force_and_check_iclog(iclog, &completed))
goto out_error;
- if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
+ if (completed)
goto out_unlock;
} else {
/*
- * Someone else is writing to this iclog.
- *
- * Use its call to flush out the data. However, the
- * other thread may not force out this LR, so we mark
- * it WANT_SYNC.
+ * Someone else is still writing to this iclog, so we
+ * need to ensure that when they release the iclog it
+ * gets synced immediately as we may be waiting on it.
*/
xlog_state_switch_iclogs(log, iclog, 0);
}
- } else {
- /*
- * If the head iclog is not active nor dirty, we just attach
- * ourselves to the head and go to sleep if necessary.
- */
- ;
}
+ /*
+ * The iclog we are about to wait on may contain the checkpoint pushed
+ * by the above xlog_cil_force() call, but it may not have been pushed
+ * to disk yet. Like the ACTIVE case above, we need to make sure caches
+ * are flushed when this iclog is written.
+ */
+ if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
+ iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+
if (flags & XFS_LOG_SYNC)
return xlog_wait_on_iclog(iclog);
out_unlock:
@@ -3245,6 +3303,7 @@ xlog_force_lsn(
bool already_slept)
{
struct xlog_in_core *iclog;
+ bool completed;
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
@@ -3258,7 +3317,8 @@ xlog_force_lsn(
goto out_unlock;
}
- if (iclog->ic_state == XLOG_STATE_ACTIVE) {
+ switch (iclog->ic_state) {
+ case XLOG_STATE_ACTIVE:
/*
* We sleep here if we haven't already slept (e.g. this is the
* first time we've looked at the correct iclog buf) and the
@@ -3281,12 +3341,31 @@ xlog_force_lsn(
&log->l_icloglock);
return -EAGAIN;
}
- atomic_inc(&iclog->ic_refcnt);
- xlog_state_switch_iclogs(log, iclog, 0);
- if (xlog_state_release_iclog(log, iclog))
+ if (xlog_force_and_check_iclog(iclog, &completed))
goto out_error;
if (log_flushed)
*log_flushed = 1;
+ if (completed)
+ goto out_unlock;
+ break;
+ case XLOG_STATE_WANT_SYNC:
+ /*
+ * This iclog may contain the checkpoint pushed by the
+ * xlog_cil_force_seq() call, but there are other writers still
+ * accessing it so it hasn't been pushed to disk yet. Like the
+ * ACTIVE case above, we need to make sure caches are flushed
+ * when this iclog is written.
+ */
+ iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+ break;
+ default:
+ /*
+ * The entire checkpoint was written by the CIL force and is on
+ * its way to disk already. It will be stable when it
+ * completes, so we don't need to manipulate caches here at all.
+ * We just need to wait for completion if necessary.
+ */
+ break;
}
if (flags & XFS_LOG_SYNC)
@@ -3559,10 +3638,10 @@ xlog_verify_grant_tail(
STATIC void
xlog_verify_tail_lsn(
struct xlog *log,
- struct xlog_in_core *iclog,
- xfs_lsn_t tail_lsn)
+ struct xlog_in_core *iclog)
{
- int blocks;
+ xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
+ int blocks;
if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
blocks =
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index b128aaa9b870..4c44bc3786c0 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -654,8 +654,9 @@ xlog_cil_push_work(
struct xfs_trans_header thdr;
struct xfs_log_iovec lhdr;
struct xfs_log_vec lvhdr = { NULL };
+ xfs_lsn_t preflush_tail_lsn;
xfs_lsn_t commit_lsn;
- xfs_lsn_t push_seq;
+ xfs_csn_t push_seq;
struct bio bio;
DECLARE_COMPLETION_ONSTACK(bdev_flush);
@@ -730,7 +731,15 @@ xlog_cil_push_work(
* because we hold the flush lock exclusively. Hence we can now issue
* a cache flush to ensure all the completed metadata in the journal we
* are about to overwrite is on stable storage.
+ *
+ * Because we are issuing this cache flush before we've written the
+ * tail lsn to the iclog, we can have metadata IO completions move the
+ * tail forwards between the completion of this flush and the iclog
+ * being written. In this case, we need to re-issue the cache flush
+ * before the iclog write. To detect whether the log tail moves, sample
+ * the tail LSN *before* we issue the flush.
*/
+ preflush_tail_lsn = atomic64_read(&log->l_tail_lsn);
xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
&bdev_flush);
@@ -941,7 +950,7 @@ restart:
* storage.
*/
commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
- xlog_state_release_iclog(log, commit_iclog);
+ xlog_state_release_iclog(log, commit_iclog, preflush_tail_lsn);
spin_unlock(&log->l_icloglock);
return;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 4c41bbfa33b0..f3e79a45d60a 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -59,6 +59,16 @@ enum xlog_iclog_state {
{ XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }, \
{ XLOG_STATE_IOERROR, "XLOG_STATE_IOERROR" }
+/*
+ * In core log flags
+ */
+#define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */
+#define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */
+
+#define XLOG_ICL_STRINGS \
+ { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \
+ { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" }
+
/*
* Log ticket flags
@@ -143,9 +153,6 @@ enum xlog_iclog_state {
#define XLOG_COVER_OPS 5
-#define XLOG_ICL_NEED_FLUSH (1 << 0) /* iclog needs REQ_PREFLUSH */
-#define XLOG_ICL_NEED_FUA (1 << 1) /* iclog needs REQ_FUA */
-
/* Ticket reservation region accounting */
#define XLOG_TIC_LEN_MAX 15
@@ -497,7 +504,8 @@ int xlog_commit_record(struct xlog *log, struct xlog_ticket *ticket,
void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
-int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
+int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
+ xfs_lsn_t log_tail_lsn);
/*
* When we crack an atomic LSN, we sample it first so that the value will not
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 4e7be6b4ca8e..699066fb9052 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -923,16 +923,41 @@ xfs_growfs_rt(
uint8_t *rsum_cache; /* old summary cache */
sbp = &mp->m_sb;
- /*
- * Initial error checking.
- */
+
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL ||
- (nrblocks = in->newblocks) <= sbp->sb_rblocks ||
- (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize)))
+
+ /* Needs to have been mounted with an rt device. */
+ if (!XFS_IS_REALTIME_MOUNT(mp))
+ return -EINVAL;
+ /*
+ * Mount should fail if the rt bitmap/summary files don't load, but
+ * we'll check anyway.
+ */
+ if (!mp->m_rbmip || !mp->m_rsumip)
+ return -EINVAL;
+
+ /* Shrink not supported. */
+ if (in->newblocks <= sbp->sb_rblocks)
+ return -EINVAL;
+
+ /* Can only change rt extent size when adding rt volume. */
+ if (sbp->sb_rblocks > 0 && in->extsize != sbp->sb_rextsize)
+ return -EINVAL;
+
+ /* Range check the extent size. */
+ if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE ||
+ XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE)
return -EINVAL;
- if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks)))
+
+ /* Unsupported realtime features. */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb) ||
+ xfs_sb_version_hasreflink(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ nrblocks = in->newblocks;
+ error = xfs_sb_validate_fsb_count(sbp, nrblocks);
+ if (error)
return error;
/*
* Read in the last block of the device, make sure it exists.
@@ -996,7 +1021,8 @@ xfs_growfs_rt(
((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
bmbno < nrbmblocks;
bmbno++) {
- xfs_trans_t *tp;
+ struct xfs_trans *tp;
+ xfs_rfsblock_t nrblocks_step;
*nmp = *mp;
nsbp = &nmp->m_sb;
@@ -1005,10 +1031,9 @@ xfs_growfs_rt(
*/
nsbp->sb_rextsize = in->extsize;
nsbp->sb_rbmblocks = bmbno + 1;
- nsbp->sb_rblocks =
- XFS_RTMIN(nrblocks,
- nsbp->sb_rbmblocks * NBBY *
- nsbp->sb_blocksize * nsbp->sb_rextsize);
+ nrblocks_step = (bmbno + 1) * NBBY * nsbp->sb_blocksize *
+ nsbp->sb_rextsize;
+ nsbp->sb_rblocks = min(nrblocks, nrblocks_step);
nsbp->sb_rextents = nsbp->sb_rblocks;
do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
ASSERT(nsbp->sb_rextents != 0);
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index f9d8d605f9b1..19260291ff8b 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -3944,6 +3944,7 @@ DECLARE_EVENT_CLASS(xlog_iclog_class,
__field(uint32_t, state)
__field(int32_t, refcount)
__field(uint32_t, offset)
+ __field(uint32_t, flags)
__field(unsigned long long, lsn)
__field(unsigned long, caller_ip)
),
@@ -3952,15 +3953,17 @@ DECLARE_EVENT_CLASS(xlog_iclog_class,
__entry->state = iclog->ic_state;
__entry->refcount = atomic_read(&iclog->ic_refcnt);
__entry->offset = iclog->ic_offset;
+ __entry->flags = iclog->ic_flags;
__entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn);
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx caller %pS",
+ TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->state, XLOG_STATE_STRINGS),
__entry->refcount,
__entry->offset,
__entry->lsn,
+ __print_flags(__entry->flags, "|", XLOG_ICL_STRINGS),
(char *)__entry->caller_ip)
);