ksmbd: use wait_event instead of schedule_timeout()
authorNamjae Jeon <linkinjeon@kernel.org>
Thu, 28 Jul 2022 14:35:18 +0000 (23:35 +0900)
committerSteve French <stfrench@microsoft.com>
Mon, 1 Aug 2022 04:14:32 +0000 (23:14 -0500)
ksmbd threads eating masses of cputime when connection is disconnected.
If connection is disconnected, ksmbd thread waits for pending requests
to be processed using schedule_timeout. schedule_timeout() incorrectly
is used, and it is more efficient to use wait_event/wake_up than to check
r_count every time with timeout.

Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
Reviewed-by: Hyunchul Lee <hyc.lee@gmail.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
fs/ksmbd/connection.c
fs/ksmbd/connection.h
fs/ksmbd/oplock.c
fs/ksmbd/server.c

index ce23cc8..756ad63 100644 (file)
@@ -66,6 +66,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
        conn->outstanding_credits = 0;
 
        init_waitqueue_head(&conn->req_running_q);
+       init_waitqueue_head(&conn->r_count_q);
        INIT_LIST_HEAD(&conn->conns_list);
        INIT_LIST_HEAD(&conn->requests);
        INIT_LIST_HEAD(&conn->async_requests);
@@ -165,7 +166,6 @@ int ksmbd_conn_write(struct ksmbd_work *work)
        struct kvec iov[3];
        int iov_idx = 0;
 
-       ksmbd_conn_try_dequeue_request(work);
        if (!work->response_buf) {
                pr_err("NULL response header\n");
                return -EINVAL;
@@ -347,8 +347,8 @@ int ksmbd_conn_handler_loop(void *p)
 
 out:
        /* Wait till all reference dropped to the Server object*/
-       while (atomic_read(&conn->r_count) > 0)
-               schedule_timeout(HZ);
+       wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
+
 
        unload_nls(conn->local_nls);
        if (default_conn_ops.terminate_fn)
index 5b39f0b..2e47304 100644 (file)
@@ -65,6 +65,7 @@ struct ksmbd_conn {
        unsigned int                    outstanding_credits;
        spinlock_t                      credits_lock;
        wait_queue_head_t               req_running_q;
+       wait_queue_head_t               r_count_q;
        /* Lock to protect requests list*/
        spinlock_t                      request_lock;
        struct list_head                requests;
index 8b55605..3ef33ed 100644 (file)
@@ -615,18 +615,13 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
        struct ksmbd_file *fp;
 
        fp = ksmbd_lookup_durable_fd(br_info->fid);
-       if (!fp) {
-               atomic_dec(&conn->r_count);
-               ksmbd_free_work_struct(work);
-               return;
-       }
+       if (!fp)
+               goto out;
 
        if (allocate_oplock_break_buf(work)) {
                pr_err("smb2_allocate_rsp_buf failed! ");
-               atomic_dec(&conn->r_count);
                ksmbd_fd_put(work, fp);
-               ksmbd_free_work_struct(work);
-               return;
+               goto out;
        }
 
        rsp_hdr = smb2_get_msg(work->response_buf);
@@ -667,8 +662,16 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
 
        ksmbd_fd_put(work, fp);
        ksmbd_conn_write(work);
+
+out:
        ksmbd_free_work_struct(work);
-       atomic_dec(&conn->r_count);
+       /*
+        * Checking waitqueue to dropping pending requests on
+        * disconnection. waitqueue_active is safe because it
+        * uses atomic operation for condition.
+        */
+       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+               wake_up(&conn->r_count_q);
 }
 
 /**
@@ -731,9 +734,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
 
        if (allocate_oplock_break_buf(work)) {
                ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
-               ksmbd_free_work_struct(work);
-               atomic_dec(&conn->r_count);
-               return;
+               goto out;
        }
 
        rsp_hdr = smb2_get_msg(work->response_buf);
@@ -771,8 +772,16 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
        inc_rfc1001_len(work->response_buf, 44);
 
        ksmbd_conn_write(work);
+
+out:
        ksmbd_free_work_struct(work);
-       atomic_dec(&conn->r_count);
+       /*
+        * Checking waitqueue to dropping pending requests on
+        * disconnection. waitqueue_active is safe because it
+        * uses atomic operation for condition.
+        */
+       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+               wake_up(&conn->r_count_q);
 }
 
 /**
index 4cd03d6..ce42bff 100644 (file)
@@ -261,7 +261,13 @@ static void handle_ksmbd_work(struct work_struct *wk)
 
        ksmbd_conn_try_dequeue_request(work);
        ksmbd_free_work_struct(work);
-       atomic_dec(&conn->r_count);
+       /*
+        * Checking waitqueue to dropping pending requests on
+        * disconnection. waitqueue_active is safe because it
+        * uses atomic operation for condition.
+        */
+       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+               wake_up(&conn->r_count_q);
 }
 
 /**