diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-10-09 16:18:38 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-06 10:46:35 -0500 |
commit | 1c75950b9a2254ef08f986e00ad20266ae9ba7f1 (patch) | |
tree | cc1a242601b27b8128c8c385f1b858a7b863f155 /fs/nfs | |
parent | 3f442547b76bf9fb70d7aecc41cf1980459253c9 (diff) |
NFS: cleanup of nfs_sync_inode_wait()
Allow callers to directly pass it a struct writeback_control.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/file.c | 9 | ||||
-rw-r--r-- | fs/nfs/inode.c | 2 | ||||
-rw-r--r-- | fs/nfs/write.c | 93 |
3 files changed, 88 insertions, 16 deletions
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index cc93865cea93..d6ee60fc3ba6 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -307,11 +307,14 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse static void nfs_invalidate_page(struct page *page, unsigned long offset) { - struct inode *inode = page->mapping->host; + loff_t range_start, range_end; + if (offset != 0) + return; /* Cancel any unstarted writes on this page */ - if (offset == 0) - nfs_sync_inode_wait(inode, page->index, 1, FLUSH_INVALIDATE); + range_start = page_offset(page); + range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); + nfs_sync_mapping_range(page->mapping, range_start, range_end, FLUSH_INVALIDATE); } static int nfs_release_page(struct page *page, gfp_t gfp) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 08cc4c5919ab..7c32187f953e 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -422,7 +422,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) int err; /* Flush out writes to the server in order to update c/mtime */ - nfs_sync_inode_wait(inode, 0, 0, FLUSH_NOCOMMIT); + nfs_sync_mapping_range(inode->i_mapping, 0, 0, FLUSH_NOCOMMIT); /* * We may force a getattr if the user cares about atime. diff --git a/fs/nfs/write.c b/fs/nfs/write.c index dbc89fa7e9d5..310fdeca6250 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -80,6 +80,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context*, static int nfs_wait_on_write_congestion(struct address_space *, int); static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); +static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how); static const struct rpc_call_ops nfs_write_partial_ops; static const struct rpc_call_ops nfs_write_full_ops; static const struct rpc_call_ops nfs_commit_ops; @@ -1476,29 +1477,38 @@ int nfs_commit_inode(struct inode *inode, int how) } #endif -long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, - unsigned int npages, int how) +long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) { + struct inode *inode = mapping->host; struct nfs_inode *nfsi = NFS_I(inode); - struct address_space *mapping = inode->i_mapping; - struct writeback_control wbc = { - .bdi = mapping->backing_dev_info, - .sync_mode = WB_SYNC_ALL, - .nr_to_write = LONG_MAX, - .range_start = ((loff_t)idx_start) << PAGE_CACHE_SHIFT, - .range_end = ((loff_t)(idx_start + npages - 1)) << PAGE_CACHE_SHIFT, - }; + unsigned long idx_start, idx_end; + unsigned int npages = 0; LIST_HEAD(head); int nocommit = how & FLUSH_NOCOMMIT; long pages, ret; + /* FIXME */ + if (wbc->range_cyclic) + idx_start = 0; + else { + idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; + idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; + if (idx_end > idx_start) { + unsigned long l_npages = 1 + idx_end - idx_start; + npages = l_npages; + if (sizeof(npages) != sizeof(l_npages) && + (unsigned long)npages != l_npages) + npages = 0; + } + } how &= ~FLUSH_NOCOMMIT; spin_lock(&nfsi->req_lock); do { + wbc->pages_skipped = 0; ret = nfs_wait_on_requests_locked(inode, idx_start, npages); if (ret != 0) continue; - pages = nfs_scan_dirty(mapping, &wbc, &head); + pages = nfs_scan_dirty(mapping, wbc, &head); if (pages != 0) { spin_unlock(&nfsi->req_lock); if (how & FLUSH_INVALIDATE) { @@ -1509,11 +1519,16 @@ long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, spin_lock(&nfsi->req_lock); continue; } + if (wbc->pages_skipped != 0) + continue; if (nocommit) break; pages = nfs_scan_commit(inode, &head, idx_start, npages); - if (pages == 0) + if (pages == 0) { + if (wbc->pages_skipped != 0) + continue; break; + } if (how & FLUSH_INVALIDATE) { spin_unlock(&nfsi->req_lock); nfs_cancel_commit_list(&head); @@ -1530,6 +1545,60 @@ long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, return ret; } +/* + * flush the inode to disk. + */ +int nfs_wb_all(struct inode *inode) +{ + struct address_space *mapping = inode->i_mapping; + struct writeback_control wbc = { + .bdi = mapping->backing_dev_info, + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .range_cyclic = 1, + }; + int ret; + + ret = nfs_sync_mapping_wait(mapping, &wbc, 0); + if (ret >= 0) + return 0; + return ret; +} + +int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how) +{ + struct writeback_control wbc = { + .bdi = mapping->backing_dev_info, + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .range_start = range_start, + .range_end = range_end, + }; + int ret; + + ret = nfs_sync_mapping_wait(mapping, &wbc, how); + if (ret >= 0) + return 0; + return ret; +} + +static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) +{ + loff_t range_start = page_offset(page); + loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); + + return nfs_sync_mapping_range(inode->i_mapping, range_start, range_end, how | FLUSH_STABLE); +} + +/* + * Write back all requests on one page - we do this before reading it. + */ +int nfs_wb_page(struct inode *inode, struct page* page) +{ + return nfs_wb_page_priority(inode, page, 0); +} + + int __init nfs_init_writepagecache(void) { nfs_wdata_cachep = kmem_cache_create("nfs_write_data", |