diff options
author | Dave Chinner <dchinner@redhat.com> | 2021-06-01 13:40:36 +1000 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2021-06-01 13:40:36 +1000 |
commit | 02c5117386884e06b6e78b72288f1e0af4320dc1 (patch) | |
tree | c03f3d5f2477d8f359a799b8001e2e8ee54bc0d7 /fs/xfs/xfs_buf.c | |
parent | c9fa563072e13337713a441cf30171feb4e96e6d (diff) |
xfs: merge _xfs_buf_get_pages()
Only called from one place now, so merge it into
xfs_buf_alloc_pages(). Because page array allocation is dependent on
bp->b_pages being null, always ensure that when the pages array is
freed we always set bp->b_pages to null.
Also convert the page array to use kmalloc() rather than
kmem_alloc() so we can use the gfp flags we've already calculated
for the allocation context instead of hard coding KM_NOFS semantics.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r-- | fs/xfs/xfs_buf.c | 48 |
1 files changed, 14 insertions, 34 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 2749bc0bf726..a6fcd829c1ea 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -273,41 +273,15 @@ _xfs_buf_alloc( } /* - * Allocate a page array capable of holding a specified number - * of pages, and point the page buf at it. - */ -STATIC int -_xfs_buf_get_pages( - struct xfs_buf *bp, - int page_count) -{ - /* Make sure that we have a page list */ - if (bp->b_pages == NULL) { - bp->b_page_count = page_count; - if (page_count <= XB_PAGES) { - bp->b_pages = bp->b_page_array; - } else { - bp->b_pages = kmem_alloc(sizeof(struct page *) * - page_count, KM_NOFS); - if (bp->b_pages == NULL) - return -ENOMEM; - } - memset(bp->b_pages, 0, sizeof(struct page *) * page_count); - } - return 0; -} - -/* * Frees b_pages if it was allocated. */ STATIC void _xfs_buf_free_pages( struct xfs_buf *bp) { - if (bp->b_pages != bp->b_page_array) { + if (bp->b_pages != bp->b_page_array) kmem_free(bp->b_pages); - bp->b_pages = NULL; - } + bp->b_pages = NULL; } /* @@ -389,16 +363,22 @@ xfs_buf_alloc_pages( long filled = 0; int error; + /* Make sure that we have a page list */ + bp->b_page_count = page_count; + if (bp->b_page_count <= XB_PAGES) { + bp->b_pages = bp->b_page_array; + } else { + bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count, + gfp_mask); + if (!bp->b_pages) + return -ENOMEM; + } + bp->b_flags |= _XBF_PAGES; + /* Assure zeroed buffer for non-read cases. */ if (!(flags & XBF_READ)) gfp_mask |= __GFP_ZERO; - error = _xfs_buf_get_pages(bp, page_count); - if (unlikely(error)) - return error; - - bp->b_flags |= _XBF_PAGES; - /* * Bulk filling of pages can take multiple calls. Not filling the entire * array is not an allocation failure, so don't back off if we get at |