summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorNamjae Jeon <linkinjeon@kernel.org>2022-07-28 23:35:18 +0900
committerSteve French <stfrench@microsoft.com>2022-07-31 23:14:32 -0500
commita14c573870a664386adc10526a6c2648ea56dae1 (patch)
treeb7d174950764bbaa0a81d0c927ef27d73b6db7cd /fs
parent17ea92a9f6d0b9a97aaec5ab748e4591d70a562c (diff)
ksmbd: use wait_event instead of schedule_timeout()
ksmbd threads eating masses of cputime when connection is disconnected. If connection is disconnected, ksmbd thread waits for pending requests to be processed using schedule_timeout. schedule_timeout() incorrectly is used, and it is more efficient to use wait_event/wake_up than to check r_count every time with timeout. Signed-off-by: Namjae Jeon <linkinjeon@kernel.org> Reviewed-by: Hyunchul Lee <hyc.lee@gmail.com> Signed-off-by: Steve French <stfrench@microsoft.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/ksmbd/connection.c6
-rw-r--r--fs/ksmbd/connection.h1
-rw-r--r--fs/ksmbd/oplock.c35
-rw-r--r--fs/ksmbd/server.c8
4 files changed, 33 insertions, 17 deletions
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index ce23cc89046e..756ad631c019 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -66,6 +66,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
conn->outstanding_credits = 0;
init_waitqueue_head(&conn->req_running_q);
+ init_waitqueue_head(&conn->r_count_q);
INIT_LIST_HEAD(&conn->conns_list);
INIT_LIST_HEAD(&conn->requests);
INIT_LIST_HEAD(&conn->async_requests);
@@ -165,7 +166,6 @@ int ksmbd_conn_write(struct ksmbd_work *work)
struct kvec iov[3];
int iov_idx = 0;
- ksmbd_conn_try_dequeue_request(work);
if (!work->response_buf) {
pr_err("NULL response header\n");
return -EINVAL;
@@ -347,8 +347,8 @@ int ksmbd_conn_handler_loop(void *p)
out:
/* Wait till all reference dropped to the Server object*/
- while (atomic_read(&conn->r_count) > 0)
- schedule_timeout(HZ);
+ wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
+
unload_nls(conn->local_nls);
if (default_conn_ops.terminate_fn)
diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
index 5b39f0bdeff8..2e4730457c92 100644
--- a/fs/ksmbd/connection.h
+++ b/fs/ksmbd/connection.h
@@ -65,6 +65,7 @@ struct ksmbd_conn {
unsigned int outstanding_credits;
spinlock_t credits_lock;
wait_queue_head_t req_running_q;
+ wait_queue_head_t r_count_q;
/* Lock to protect requests list*/
spinlock_t request_lock;
struct list_head requests;
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
index 8b5560574d4c..3ef33ed4cdba 100644
--- a/fs/ksmbd/oplock.c
+++ b/fs/ksmbd/oplock.c
@@ -615,18 +615,13 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
struct ksmbd_file *fp;
fp = ksmbd_lookup_durable_fd(br_info->fid);
- if (!fp) {
- atomic_dec(&conn->r_count);
- ksmbd_free_work_struct(work);
- return;
- }
+ if (!fp)
+ goto out;
if (allocate_oplock_break_buf(work)) {
pr_err("smb2_allocate_rsp_buf failed! ");
- atomic_dec(&conn->r_count);
ksmbd_fd_put(work, fp);
- ksmbd_free_work_struct(work);
- return;
+ goto out;
}
rsp_hdr = smb2_get_msg(work->response_buf);
@@ -667,8 +662,16 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
ksmbd_fd_put(work, fp);
ksmbd_conn_write(work);
+
+out:
ksmbd_free_work_struct(work);
- atomic_dec(&conn->r_count);
+ /*
+ * Checking waitqueue to dropping pending requests on
+ * disconnection. waitqueue_active is safe because it
+ * uses atomic operation for condition.
+ */
+ if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+ wake_up(&conn->r_count_q);
}
/**
@@ -731,9 +734,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
if (allocate_oplock_break_buf(work)) {
ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
- ksmbd_free_work_struct(work);
- atomic_dec(&conn->r_count);
- return;
+ goto out;
}
rsp_hdr = smb2_get_msg(work->response_buf);
@@ -771,8 +772,16 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
inc_rfc1001_len(work->response_buf, 44);
ksmbd_conn_write(work);
+
+out:
ksmbd_free_work_struct(work);
- atomic_dec(&conn->r_count);
+ /*
+ * Checking waitqueue to dropping pending requests on
+ * disconnection. waitqueue_active is safe because it
+ * uses atomic operation for condition.
+ */
+ if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+ wake_up(&conn->r_count_q);
}
/**
diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
index 4cd03d661df0..ce42bff42ef9 100644
--- a/fs/ksmbd/server.c
+++ b/fs/ksmbd/server.c
@@ -261,7 +261,13 @@ static void handle_ksmbd_work(struct work_struct *wk)
ksmbd_conn_try_dequeue_request(work);
ksmbd_free_work_struct(work);
- atomic_dec(&conn->r_count);
+ /*
+ * Checking waitqueue to dropping pending requests on
+ * disconnection. waitqueue_active is safe because it
+ * uses atomic operation for condition.
+ */
+ if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+ wake_up(&conn->r_count_q);
}
/**