diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-12 17:17:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-12 17:17:51 -0700 |
commit | 4ce9d181ebe53abbca5f450b8a2984b8c3a38f26 (patch) | |
tree | b563ac755c99ddf430402b2850199fdb625f1f7c /fs/xfs/xfs_aops.c | |
parent | 5010fe9f095414b959fd6fda63986dc90fd0c419 (diff) | |
parent | 488ca3d8d088ec4658c87aaec6a91e98acccdd54 (diff) |
Merge tag 'xfs-5.3-merge-12' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull xfs updates from Darrick Wong:
"In this release there are a significant amounts of consolidations and
cleanups in the log code; restructuring of the log to issue struct
bios directly; new bulkstat ioctls to return v5 fs inode information
(and fix all the padding problems of the old ioctl); the beginnings of
multithreaded inode walks (e.g. quotacheck); and a reduction in memory
usage in the online scrub code leading to reduced runtimes.
- Refactor inode geometry calculation into a single structure instead
of open-coding pieces everywhere.
- Add online repair to build options.
- Remove unnecessary function call flags and functions.
- Claim maintainership of various loose xfs documentation and header
files.
- Use struct bio directly for log buffer IOs instead of struct
xfs_buf.
- Reduce log item boilerplate code requirements.
- Merge log item code spread across too many files.
- Further distinguish between log item commits and cancellations.
- Various small cleanups to the ag small allocator.
- Support cgroup-aware writeback
- libxfs refactoring for mkfs cleanup
- Remove unneeded #includes
- Fix a memory allocation miscalculation in the new log bio code
- Fix bisection problems
- Fix a crash in ioend processing caused by tripping over freeing of
preallocated transactions
- Split out a generic inode walk mechanism from the bulkstat code,
hook up all the internal users to use the walking code, then clean
up bulkstat to serve only the bulkstat ioctls.
- Add a multithreaded iwalk implementation to speed up quotacheck on
fast storage with many CPUs.
- Remove unnecessary return values in logging teardown functions.
- Supplement the bstat and inogrp structures with new bulkstat and
inumbers structures that have all the fields we need for v5
filesystem features and none of the padding problems of their
predecessors.
- Wire up new ioctls that use the new structures with a much simpler
bulk_ireq structure at the head instead of the pointerhappy mess we
had before.
- Enable userspace to constrain bulkstat returns to a single AG or a
single special inode so that we can phase out a lot of geometry
guesswork in userspace.
- Reduce memory consumption and zeroing overhead in extended
attribute scrub code.
- Fix some behavioral regressions in the new bulkstat backend code.
- Fix some behavioral regressions in the new log bio code"
* tag 'xfs-5.3-merge-12' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (100 commits)
xfs: chain bios the right way around in xfs_rw_bdev
xfs: bump INUMBERS cursor correctly in xfs_inumbers_walk
xfs: don't update lastino for FSBULKSTAT_SINGLE
xfs: online scrub needn't bother zeroing its temporary buffer
xfs: only allocate memory for scrubbing attributes when we need it
xfs: refactor attr scrub memory allocation function
xfs: refactor extended attribute buffer pointer functions
xfs: attribute scrub should use seen_enough to pass error values
xfs: allow single bulkstat of special inodes
xfs: specify AG in bulk req
xfs: wire up the v5 inumbers ioctl
xfs: wire up new v5 bulkstat ioctls
xfs: introduce v5 inode group structure
xfs: introduce new v5 bulkstat structure
xfs: rename bulkstat functions
xfs: remove various bulk request typedef usage
fs: xfs: xfs_log: Change return type from int to void
xfs: poll waiting for quotacheck
xfs: multithreaded iwalk implementation
xfs: refactor INUMBERS to use iwalk functions
...
Diffstat (limited to 'fs/xfs/xfs_aops.c')
-rw-r--r-- | fs/xfs/xfs_aops.c | 121 |
1 files changed, 65 insertions, 56 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 11f703d4a605..761248ee2778 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -12,16 +12,11 @@ #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_trans.h" -#include "xfs_inode_item.h" -#include "xfs_alloc.h" -#include "xfs_error.h" #include "xfs_iomap.h" #include "xfs_trace.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" -#include "xfs_bmap_btree.h" #include "xfs_reflink.h" -#include <linux/writeback.h> /* * structure owned by writepages passed to individual writepage calls @@ -138,8 +133,7 @@ xfs_setfilesize_trans_alloc( struct xfs_trans *tp; int error; - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, - XFS_TRANS_NOFS, &tp); + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); if (error) return error; @@ -240,9 +234,17 @@ xfs_end_ioend( struct xfs_inode *ip = XFS_I(ioend->io_inode); xfs_off_t offset = ioend->io_offset; size_t size = ioend->io_size; + unsigned int nofs_flag; int error; /* + * We can allocate memory here while doing writeback on behalf of + * memory reclaim. To avoid memory allocation deadlocks set the + * task-wide nofs context for the following operations. + */ + nofs_flag = memalloc_nofs_save(); + + /* * Just clean up the in-memory strutures if the fs has been shut down. */ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { @@ -282,6 +284,8 @@ done: list_del_init(&ioend->io_list); xfs_destroy_ioend(ioend, error); } + + memalloc_nofs_restore(nofs_flag); } /* @@ -290,13 +294,9 @@ done: static bool xfs_ioend_can_merge( struct xfs_ioend *ioend, - int ioend_error, struct xfs_ioend *next) { - int next_error; - - next_error = blk_status_to_errno(next->io_bio->bi_status); - if (ioend_error != next_error) + if (ioend->io_bio->bi_status != next->io_bio->bi_status) return false; if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK)) return false; @@ -305,11 +305,28 @@ xfs_ioend_can_merge( return false; if (ioend->io_offset + ioend->io_size != next->io_offset) return false; - if (xfs_ioend_is_append(ioend) != xfs_ioend_is_append(next)) - return false; return true; } +/* + * If the to be merged ioend has a preallocated transaction for file + * size updates we need to ensure the ioend it is merged into also + * has one. If it already has one we can simply cancel the transaction + * as it is guaranteed to be clean. + */ +static void +xfs_ioend_merge_append_transactions( + struct xfs_ioend *ioend, + struct xfs_ioend *next) +{ + if (!ioend->io_append_trans) { + ioend->io_append_trans = next->io_append_trans; + next->io_append_trans = NULL; + } else { + xfs_setfilesize_ioend(next, -ECANCELED); + } +} + /* Try to merge adjacent completions. */ STATIC void xfs_ioend_try_merge( @@ -317,25 +334,16 @@ xfs_ioend_try_merge( struct list_head *more_ioends) { struct xfs_ioend *next_ioend; - int ioend_error; - int error; - - if (list_empty(more_ioends)) - return; - - ioend_error = blk_status_to_errno(ioend->io_bio->bi_status); while (!list_empty(more_ioends)) { next_ioend = list_first_entry(more_ioends, struct xfs_ioend, io_list); - if (!xfs_ioend_can_merge(ioend, ioend_error, next_ioend)) + if (!xfs_ioend_can_merge(ioend, next_ioend)) break; list_move_tail(&next_ioend->io_list, &ioend->io_list); ioend->io_size += next_ioend->io_size; - if (ioend->io_append_trans) { - error = xfs_setfilesize_ioend(next_ioend, 1); - ASSERT(error == 1); - } + if (next_ioend->io_append_trans) + xfs_ioend_merge_append_transactions(ioend, next_ioend); } } @@ -626,7 +634,7 @@ allocate_blocks: * reference to the ioend to ensure that the ioend completion is only done once * all bios have been submitted and the ioend is really done. * - * If @fail is non-zero, it means that we have a situation where some part of + * If @status is non-zero, it means that we have a situation where some part of * the submission process has failed after we have marked paged for writeback * and unlocked them. In this situation, we need to fail the bio and ioend * rather than submit it to IO. This typically only happens on a filesystem @@ -638,21 +646,19 @@ xfs_submit_ioend( struct xfs_ioend *ioend, int status) { + unsigned int nofs_flag; + + /* + * We can allocate memory here while doing writeback on behalf of + * memory reclaim. To avoid memory allocation deadlocks set the + * task-wide nofs context for the following operations. + */ + nofs_flag = memalloc_nofs_save(); + /* Convert CoW extents to regular */ if (!status && ioend->io_fork == XFS_COW_FORK) { - /* - * Yuk. This can do memory allocation, but is not a - * transactional operation so everything is done in GFP_KERNEL - * context. That can deadlock, because we hold pages in - * writeback state and GFP_KERNEL allocations can block on them. - * Hence we must operate in nofs conditions here. - */ - unsigned nofs_flag; - - nofs_flag = memalloc_nofs_save(); status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), ioend->io_offset, ioend->io_size); - memalloc_nofs_restore(nofs_flag); } /* Reserve log space if we might write beyond the on-disk inode size. */ @@ -663,9 +669,10 @@ xfs_submit_ioend( !ioend->io_append_trans) status = xfs_setfilesize_trans_alloc(ioend); + memalloc_nofs_restore(nofs_flag); + ioend->io_bio->bi_private = ioend; ioend->io_bio->bi_end_io = xfs_end_bio; - ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); /* * If we are failing the IO now, just mark the ioend with an @@ -679,7 +686,6 @@ xfs_submit_ioend( return status; } - ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint; submit_bio(ioend->io_bio); return 0; } @@ -691,7 +697,8 @@ xfs_alloc_ioend( xfs_exntst_t state, xfs_off_t offset, struct block_device *bdev, - sector_t sector) + sector_t sector, + struct writeback_control *wbc) { struct xfs_ioend *ioend; struct bio *bio; @@ -699,6 +706,9 @@ xfs_alloc_ioend( bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset); bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = sector; + bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); + bio->bi_write_hint = inode->i_write_hint; + wbc_init_bio(wbc, bio); ioend = container_of(bio, struct xfs_ioend, io_inline_bio); INIT_LIST_HEAD(&ioend->io_list); @@ -719,24 +729,22 @@ xfs_alloc_ioend( * so that the bi_private linkage is set up in the right direction for the * traversal in xfs_destroy_ioend(). */ -static void +static struct bio * xfs_chain_bio( - struct xfs_ioend *ioend, - struct writeback_control *wbc, - struct block_device *bdev, - sector_t sector) + struct bio *prev) { struct bio *new; new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); - bio_set_dev(new, bdev); - new->bi_iter.bi_sector = sector; - bio_chain(ioend->io_bio, new); - bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ - ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); - ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint; - submit_bio(ioend->io_bio); - ioend->io_bio = new; + bio_copy_dev(new, prev);/* also copies over blkcg information */ + new->bi_iter.bi_sector = bio_end_sector(prev); + new->bi_opf = prev->bi_opf; + new->bi_write_hint = prev->bi_write_hint; + + bio_chain(prev, new); + bio_get(prev); /* for xfs_destroy_ioend */ + submit_bio(prev); + return new; } /* @@ -772,7 +780,7 @@ xfs_add_to_ioend( if (wpc->ioend) list_add(&wpc->ioend->io_list, iolist); wpc->ioend = xfs_alloc_ioend(inode, wpc->fork, - wpc->imap.br_state, offset, bdev, sector); + wpc->imap.br_state, offset, bdev, sector, wbc); } merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff, @@ -783,11 +791,12 @@ xfs_add_to_ioend( if (!merged) { if (bio_full(wpc->ioend->io_bio, len)) - xfs_chain_bio(wpc->ioend, wbc, bdev, sector); + wpc->ioend->io_bio = xfs_chain_bio(wpc->ioend->io_bio); bio_add_page(wpc->ioend->io_bio, page, len, poff); } wpc->ioend->io_size += len; + wbc_account_io(wbc, page, len); } STATIC void |