diff options
author | Tejun Heo <tj@kernel.org> | 2015-05-22 17:13:51 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-06-02 08:33:36 -0600 |
commit | c00ddad39f512b1a81e25b7892217ce10efab0f1 (patch) | |
tree | bc569603046deab04c214b9ff88b799ae094bc58 /fs | |
parent | ebe41ab0c79d5633123f6faa3265a1a63c5f22d8 (diff) |
writeback: remove bdi_start_writeback()
bdi_start_writeback() is a thin wrapper on top of
__wb_start_writeback() which is used only by laptop_mode_timer_fn().
This patches removes bdi_start_writeback(), renames
__wb_start_writeback() to wb_start_writeback() and makes
laptop_mode_timer_fn() use it instead.
This doesn't cause any functional difference and will ease making
laptop_mode_timer_fn() cgroup writeback aware.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/fs-writeback.c | 68 |
1 files changed, 25 insertions, 43 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 921a9e43b1db..79f11af67357 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -184,33 +184,6 @@ out_unlock: spin_unlock_bh(&wb->work_lock); } -static void __wb_start_writeback(struct bdi_writeback *wb, long nr_pages, - bool range_cyclic, enum wb_reason reason) -{ - struct wb_writeback_work *work; - - if (!wb_has_dirty_io(wb)) - return; - - /* - * This is WB_SYNC_NONE writeback, so if allocation fails just - * wakeup the thread for old dirty data writeback - */ - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - trace_writeback_nowork(wb->bdi); - wb_wakeup(wb); - return; - } - - work->sync_mode = WB_SYNC_NONE; - work->nr_pages = nr_pages; - work->range_cyclic = range_cyclic; - work->reason = reason; - - wb_queue_work(wb, work); -} - #ifdef CONFIG_CGROUP_WRITEBACK /** @@ -240,22 +213,31 @@ EXPORT_SYMBOL_GPL(inode_congested); #endif /* CONFIG_CGROUP_WRITEBACK */ -/** - * bdi_start_writeback - start writeback - * @bdi: the backing device to write from - * @nr_pages: the number of pages to write - * @reason: reason why some writeback work was initiated - * - * Description: - * This does WB_SYNC_NONE opportunistic writeback. The IO is only - * started when this function returns, we make no guarantees on - * completion. Caller need not hold sb s_umount semaphore. - * - */ -void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, - enum wb_reason reason) +void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, + bool range_cyclic, enum wb_reason reason) { - __wb_start_writeback(&bdi->wb, nr_pages, true, reason); + struct wb_writeback_work *work; + + if (!wb_has_dirty_io(wb)) + return; + + /* + * This is WB_SYNC_NONE writeback, so if allocation fails just + * wakeup the thread for old dirty data writeback + */ + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + trace_writeback_nowork(wb->bdi); + wb_wakeup(wb); + return; + } + + work->sync_mode = WB_SYNC_NONE; + work->nr_pages = nr_pages; + work->range_cyclic = range_cyclic; + work->reason = reason; + + wb_queue_work(wb, work); } /** @@ -1219,7 +1201,7 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) - __wb_start_writeback(&bdi->wb, nr_pages, false, reason); + wb_start_writeback(&bdi->wb, nr_pages, false, reason); rcu_read_unlock(); } |