diff options
author | Kirill Tkhai <ktkhai@virtuozzo.com> | 2018-08-27 18:29:46 +0300 |
---|---|---|
committer | Miklos Szeredi <mszeredi@redhat.com> | 2018-09-28 16:43:22 +0200 |
commit | ae2dffa39485c6fd4f22321814c7287c274b473a (patch) | |
tree | ae65ad5f3ac4f02fb9f7ba8b7fce89cac5d565bf /fs/fuse/dev.c | |
parent | 2b30a533148af4f3865c0dcd619ad93ab3f4ba52 (diff) |
fuse: introduce fc->bg_lock
To reduce contention of fc->lock, this patch introduces bg_lock for
protection of fields related to background queue. These are:
max_background, congestion_threshold, num_background, active_background,
bg_queue and blocked.
This allows next patch to make async reads not requiring fc->lock, so async
reads and writes will have better performance executed in parallel.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r-- | fs/fuse/dev.c | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 6a7d3b4424e1..d4b9ffc6544d 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -287,10 +287,10 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) * We get here in the unlikely case that a background * request was allocated but not sent */ - spin_lock(&fc->lock); + spin_lock(&fc->bg_lock); if (!fc->blocked) wake_up(&fc->blocked_waitq); - spin_unlock(&fc->lock); + spin_unlock(&fc->bg_lock); } if (test_bit(FR_WAITING, &req->flags)) { @@ -390,7 +390,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) WARN_ON(test_bit(FR_PENDING, &req->flags)); WARN_ON(test_bit(FR_SENT, &req->flags)); if (test_bit(FR_BACKGROUND, &req->flags)) { - spin_lock(&fc->lock); + spin_lock(&fc->bg_lock); clear_bit(FR_BACKGROUND, &req->flags); if (fc->num_background == fc->max_background) { fc->blocked = 0; @@ -413,7 +413,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) fc->num_background--; fc->active_background--; flush_bg_queue(fc); - spin_unlock(&fc->lock); + spin_unlock(&fc->bg_lock); } wake_up(&req->waitq); if (req->end) @@ -586,8 +586,8 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) * * fc->connected must have been checked previously */ -void fuse_request_send_background_locked(struct fuse_conn *fc, - struct fuse_req *req) +void fuse_request_send_background_nocheck(struct fuse_conn *fc, + struct fuse_req *req) { BUG_ON(!test_bit(FR_BACKGROUND, &req->flags)); if (!test_bit(FR_WAITING, &req->flags)) { @@ -595,6 +595,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc, atomic_inc(&fc->num_waiting); } __set_bit(FR_ISREPLY, &req->flags); + spin_lock(&fc->bg_lock); fc->num_background++; if (fc->num_background == fc->max_background) fc->blocked = 1; @@ -604,6 +605,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc, } list_add_tail(&req->list, &fc->bg_queue); flush_bg_queue(fc); + spin_unlock(&fc->bg_lock); } void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) @@ -611,7 +613,7 @@ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) BUG_ON(!req->end); spin_lock(&fc->lock); if (fc->connected) { - fuse_request_send_background_locked(fc, req); + fuse_request_send_background_nocheck(fc, req); spin_unlock(&fc->lock); } else { spin_unlock(&fc->lock); @@ -2118,7 +2120,6 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort) LIST_HEAD(to_end); fc->connected = 0; - fc->blocked = 0; fc->aborted = is_abort; fuse_set_initialized(fc); list_for_each_entry(fud, &fc->devices, entry) { @@ -2140,8 +2141,11 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort) list_splice_tail_init(&fpq->processing, &to_end); spin_unlock(&fpq->lock); } + spin_lock(&fc->bg_lock); + fc->blocked = 0; fc->max_background = UINT_MAX; flush_bg_queue(fc); + spin_unlock(&fc->bg_lock); spin_lock(&fiq->waitq.lock); fiq->connected = 0; |