static struct ksmbd_conn_ops default_conn_ops;
LIST_HEAD(conn_list);
-DEFINE_RWLOCK(conn_list_lock);
+DECLARE_RWSEM(conn_list_lock);
/**
* ksmbd_conn_free() - free resources of the connection instance
*/
void ksmbd_conn_free(struct ksmbd_conn *conn)
{
- write_lock(&conn_list_lock);
+ down_write(&conn_list_lock);
list_del(&conn->conns_list);
- write_unlock(&conn_list_lock);
+ up_write(&conn_list_lock);
xa_destroy(&conn->sessions);
kvfree(conn->request_buf);
spin_lock_init(&conn->llist_lock);
INIT_LIST_HEAD(&conn->lock_list);
- write_lock(&conn_list_lock);
+ down_write(&conn_list_lock);
list_add(&conn->conns_list, &conn_list);
- write_unlock(&conn_list_lock);
+ up_write(&conn_list_lock);
return conn;
}
struct ksmbd_conn *t;
bool ret = false;
- read_lock(&conn_list_lock);
+ down_read(&conn_list_lock);
list_for_each_entry(t, &conn_list, conns_list) {
if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
continue;
ret = true;
break;
}
- read_unlock(&conn_list_lock);
+ up_read(&conn_list_lock);
return ret;
}
mutex_unlock(&conn->srv_mutex);
}
-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
+void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
{
+ struct ksmbd_conn *conn;
+
+ down_read(&conn_list_lock);
+ list_for_each_entry(conn, &conn_list, conns_list) {
+ if (conn->binding || xa_load(&conn->sessions, sess_id))
+ WRITE_ONCE(conn->status, status);
+ }
+ up_read(&conn_list_lock);
+}
+
+void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
+{
+ struct ksmbd_conn *bind_conn;
+
wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
+
+ down_read(&conn_list_lock);
+ list_for_each_entry(bind_conn, &conn_list, conns_list) {
+ if (bind_conn == conn)
+ continue;
+
+ if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
+ !ksmbd_conn_releasing(bind_conn) &&
+ atomic_read(&bind_conn->req_running)) {
+ wait_event(bind_conn->req_running_q,
+ atomic_read(&bind_conn->req_running) == 0);
+ }
+ }
+ up_read(&conn_list_lock);
}
int ksmbd_conn_write(struct ksmbd_work *work)
}
out:
+ ksmbd_conn_set_releasing(conn);
/* Wait till all reference dropped to the Server object*/
wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
-
if (IS_ENABLED(CONFIG_UNICODE))
utf8_unload(conn->um);
unload_nls(conn->local_nls);
struct ksmbd_transport *t;
again:
- read_lock(&conn_list_lock);
+ down_read(&conn_list_lock);
list_for_each_entry(conn, &conn_list, conns_list) {
struct task_struct *task;
task->comm, task_pid_nr(task));
ksmbd_conn_set_exiting(conn);
if (t->ops->shutdown) {
- read_unlock(&conn_list_lock);
+ up_read(&conn_list_lock);
t->ops->shutdown(t);
- read_lock(&conn_list_lock);
+ down_read(&conn_list_lock);
}
}
- read_unlock(&conn_list_lock);
+ up_read(&conn_list_lock);
if (!list_empty(&conn_list)) {
schedule_timeout_interruptible(HZ / 10); /* 100ms */
KSMBD_SESS_GOOD,
KSMBD_SESS_EXITING,
KSMBD_SESS_NEED_RECONNECT,
- KSMBD_SESS_NEED_NEGOTIATE
+ KSMBD_SESS_NEED_NEGOTIATE,
+ KSMBD_SESS_RELEASING
};
struct ksmbd_stats {
#define KSMBD_TCP_PEER_SOCKADDR(c) ((struct sockaddr *)&((c)->peer_addr))
extern struct list_head conn_list;
-extern rwlock_t conn_list_lock;
+extern struct rw_semaphore conn_list_lock;
bool ksmbd_conn_alive(struct ksmbd_conn *conn);
-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
+void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
struct ksmbd_conn *ksmbd_conn_alloc(void);
void ksmbd_conn_free(struct ksmbd_conn *conn);
bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
}
+static inline bool ksmbd_conn_releasing(struct ksmbd_conn *conn)
+{
+ return READ_ONCE(conn->status) == KSMBD_SESS_RELEASING;
+}
+
static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
{
WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
{
WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
}
+
+static inline void ksmbd_conn_set_releasing(struct ksmbd_conn *conn)
+{
+ WRITE_ONCE(conn->status, KSMBD_SESS_RELEASING);
+}
+
+void ksmbd_all_conn_set_status(u64 sess_id, u32 status);
#endif /* __CONNECTION_H__ */
struct ksmbd_tree_connect *tc;
unsigned long id;
+ if (!sess)
+ return -EINVAL;
+
xa_for_each(&sess->tree_conns, id, tc)
ret |= ksmbd_tree_conn_disconnect(sess, tc);
xa_destroy(&sess->tree_conns);
if (!sess)
return;
- down_write(&sessions_table_lock);
- hash_del(&sess->hlist);
- up_write(&sessions_table_lock);
-
if (sess->user)
ksmbd_free_user(sess->user);
unsigned long id;
struct ksmbd_session *sess;
+ down_write(&sessions_table_lock);
xa_for_each(&conn->sessions, id, sess) {
if (sess->state != SMB2_SESSION_VALID ||
time_after(jiffies,
sess->last_active + SMB2_SESSION_TIMEOUT)) {
xa_erase(&conn->sessions, sess->id);
+ hash_del(&sess->hlist);
ksmbd_session_destroy(sess);
continue;
}
}
+ up_write(&sessions_table_lock);
}
int ksmbd_session_register(struct ksmbd_conn *conn,
return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
}
-static void ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
{
struct channel *chann;
chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
if (!chann)
- return;
+ return -ENOENT;
kfree(chann);
+ return 0;
}
void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
struct ksmbd_session *sess;
unsigned long id;
+ down_write(&sessions_table_lock);
+ if (conn->binding) {
+ int bkt;
+ struct hlist_node *tmp;
+
+ hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) {
+ if (!ksmbd_chann_del(conn, sess) &&
+ xa_empty(&sess->ksmbd_chann_list)) {
+ hash_del(&sess->hlist);
+ ksmbd_session_destroy(sess);
+ }
+ }
+ }
+
xa_for_each(&conn->sessions, id, sess) {
+ unsigned long chann_id;
+ struct channel *chann;
+
+ xa_for_each(&sess->ksmbd_chann_list, chann_id, chann) {
+ if (chann->conn != conn)
+ ksmbd_conn_set_exiting(chann->conn);
+ }
+
ksmbd_chann_del(conn, sess);
if (xa_empty(&sess->ksmbd_chann_list)) {
xa_erase(&conn->sessions, sess->id);
+ hash_del(&sess->hlist);
ksmbd_session_destroy(sess);
}
}
+ up_write(&sessions_table_lock);
}
struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_session *sess;
struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+ u64 sess_id = le64_to_cpu(req->hdr.SessionId);
rsp->StructureSize = cpu_to_le16(4);
inc_rfc1001_len(work->response_buf, 4);
ksmbd_debug(SMB, "request\n");
- ksmbd_conn_set_need_reconnect(conn);
+ ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
ksmbd_close_session_fds(work);
- ksmbd_conn_wait_idle(conn);
+ ksmbd_conn_wait_idle(conn, sess_id);
/*
* Re-lookup session to validate if session is deleted
* while waiting request complete
*/
- sess = ksmbd_session_lookup(conn, le64_to_cpu(req->hdr.SessionId));
+ sess = ksmbd_session_lookup_all(conn, sess_id);
if (ksmbd_tree_conn_session_logoff(sess)) {
ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
ksmbd_free_user(sess->user);
sess->user = NULL;
- ksmbd_conn_set_need_negotiate(conn);
+ ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
return 0;
}
nolock = 1;
/* check locks in connection list */
- read_lock(&conn_list_lock);
+ down_read(&conn_list_lock);
list_for_each_entry(conn, &conn_list, conns_list) {
spin_lock(&conn->llist_lock);
list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
list_del(&cmp_lock->flist);
list_del(&cmp_lock->clist);
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ up_read(&conn_list_lock);
locks_free_lock(cmp_lock->fl);
kfree(cmp_lock);
cmp_lock->start > smb_lock->start &&
cmp_lock->start < smb_lock->end) {
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ up_read(&conn_list_lock);
pr_err("previous lock conflict with zero byte lock range\n");
goto out;
}
smb_lock->start > cmp_lock->start &&
smb_lock->start < cmp_lock->end) {
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ up_read(&conn_list_lock);
pr_err("current lock conflict with zero byte lock range\n");
goto out;
}
cmp_lock->end >= smb_lock->end)) &&
!cmp_lock->zero_len && !smb_lock->zero_len) {
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ up_read(&conn_list_lock);
pr_err("Not allow lock operation on exclusive lock range\n");
goto out;
}
}
spin_unlock(&conn->llist_lock);
}
- read_unlock(&conn_list_lock);
+ up_read(&conn_list_lock);
out_check_cl:
if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
pr_err("Try to unlock nolocked range\n");