summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/writeback.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2020-03-22 14:03:04 +0800
committerJens Axboe <axboe@kernel.dk>2020-03-22 10:06:57 -0600
commitb004aa867c48b3232835b61ed9d44b572e29498e (patch)
treec4e27aff03ea6deada3f73eb5e36d3715d541908 /drivers/md/bcache/writeback.c
parent9876e38609a8ea98bbb447eb5a8f1c0400a6ccb8 (diff)
bcache: optimize barrier usage for Rmw atomic bitops
We can avoid the unnecessary barrier on non LL/SC architectures, such as x86. Instead, use the smp_mb__after_atomic(). Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Coly Li <colyli@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/bcache/writeback.c')
-rw-r--r--drivers/md/bcache/writeback.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 6673a37c8bd2..72ba6d015786 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -183,7 +183,7 @@ static void update_writeback_rate(struct work_struct *work)
*/
set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
- smp_mb();
+ smp_mb__after_atomic();
/*
* CACHE_SET_IO_DISABLE might be set via sysfs interface,
@@ -193,7 +193,7 @@ static void update_writeback_rate(struct work_struct *work)
test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
- smp_mb();
+ smp_mb__after_atomic();
return;
}
@@ -229,7 +229,7 @@ static void update_writeback_rate(struct work_struct *work)
*/
clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
- smp_mb();
+ smp_mb__after_atomic();
}
static unsigned int writeback_delay(struct cached_dev *dc,