diff options
author | Dave Chinner <dchinner@redhat.com> | 2021-06-07 11:50:48 +1000 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2021-06-07 11:50:48 +1000 |
commit | 8bcac7448a942fa4662441a310c97d47cec24310 (patch) | |
tree | 455fa42b8ecd59b820ed30e4f76266b66306e123 | |
parent | 170041f71596dad3f34dea40ee0ef0c848d3f906 (diff) |
xfs: merge xfs_buf_allocate_memory
It only has one caller and is now a simple function, so merge it
into the caller.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
-rw-r--r-- | fs/xfs/xfs_buf.c | 44 |
1 files changed, 13 insertions, 31 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 7dea73535959..a55471612150 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -313,11 +313,11 @@ xfs_buf_free( static int xfs_buf_alloc_kmem( struct xfs_buf *bp, - size_t size, xfs_buf_flags_t flags) { int align_mask = xfs_buftarg_dma_alignment(bp->b_target); xfs_km_flags_t kmflag_mask = KM_NOFS; + size_t size = BBTOB(bp->b_length); /* Assure zeroed buffer for non-read cases. */ if (!(flags & XBF_READ)) @@ -400,33 +400,6 @@ xfs_buf_alloc_pages( return 0; } - -/* - * Allocates all the pages for buffer in question and builds it's page list. - */ -static int -xfs_buf_allocate_memory( - struct xfs_buf *bp, - uint flags) -{ - size_t size; - int error; - - /* - * For buffers that fit entirely within a single page, first attempt to - * allocate the memory from the heap to minimise memory usage. If we - * can't get heap memory for these small buffers, we fall back to using - * the page allocator. - */ - size = BBTOB(bp->b_length); - if (size < PAGE_SIZE) { - error = xfs_buf_alloc_kmem(bp, size, flags); - if (!error) - return 0; - } - return xfs_buf_alloc_pages(bp, flags); -} - /* * Map buffer into kernel address-space if necessary. */ @@ -688,9 +661,18 @@ xfs_buf_get_map( if (error) return error; - error = xfs_buf_allocate_memory(new_bp, flags); - if (error) - goto out_free_buf; + /* + * For buffers that fit entirely within a single page, first attempt to + * allocate the memory from the heap to minimise memory usage. If we + * can't get heap memory for these small buffers, we fall back to using + * the page allocator. + */ + if (BBTOB(new_bp->b_length) >= PAGE_SIZE || + xfs_buf_alloc_kmem(new_bp, flags) < 0) { + error = xfs_buf_alloc_pages(new_bp, flags); + if (error) + goto out_free_buf; + } error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); if (error) |