summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-01-06 14:43:09 +1100
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2009-01-06 14:43:09 +1100
commit95f8e302c04c0b0c6de35ab399a5551605eeb006 (patch)
tree178ea7347f9affb8548786216396eae4970601a5
parentd2859751cd0bf586941ffa7308635a293f943c17 (diff)
[XFS] use scalable vmap API
Implement XFS's large buffer support with the new vmap APIs. See the vmap rewrite (db64fe02) for some numbers. The biggest improvement that comes from using the new APIs is avoiding the global KVA allocation lock on every call. Signed-off-by: Nick Piggin <npiggin@suse.de> Reviewed-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 0b2177a9fbd..d71dc44e21e 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -264,7 +264,7 @@ xfs_buf_free(
uint i;
if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
- vunmap(bp->b_addr - bp->b_offset);
+ vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i];
@@ -386,8 +386,8 @@ _xfs_buf_map_pages(
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) {
- bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
- VM_MAP, PAGE_KERNEL);
+ bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
+ -1, PAGE_KERNEL);
if (unlikely(bp->b_addr == NULL))
return -ENOMEM;
bp->b_addr += bp->b_offset;