cifs: fix potential deadlock in cache_refresh_path()
authorPaulo Alcantara <pc@cjr.nz>
Tue, 17 Jan 2023 22:00:37 +0000 (19:00 -0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 1 Feb 2023 07:27:13 +0000 (08:27 +0100)
[ Upstream commit 9fb0db40513e27537fde63287aea920b60557a69 ]

Avoid getting DFS referral from an exclusive lock in
cache_refresh_path() because the tcon IPC used for getting the
referral could be disconnected and thus causing a deadlock as shown
below:

task A                       task B
======                       ======
cifs_demultiplex_thread()    dfs_cache_find()
 cifs_handle_standard()       cache_refresh_path()
  reconnect_dfs_server()       down_write()
   dfs_cache_noreq_find()       get_dfs_referral()
    down_read() <- deadlock      smb2_get_dfs_refer()
                                  SMB2_ioctl()
   cifs_send_recv()
    compound_send_recv()
     wait_for_response()

where task A cannot wake up task B because it is blocked on
down_read() due to the exclusive lock held in cache_refresh_path() and
therefore not being able to make progress.

Fixes: c9f711039905 ("cifs: keep referral server sessions alive")
Reviewed-by: Aurélien Aptel <aurelien.aptel@gmail.com>
Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Signed-off-by: Steve French <stfrench@microsoft.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
fs/cifs/dfs_cache.c

index 1f3efa7..8c98fa5 100644 (file)
@@ -792,26 +792,27 @@ static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const
  */
 static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
 {
-       int rc;
-       struct cache_entry *ce;
        struct dfs_info3_param *refs = NULL;
+       struct cache_entry *ce;
        int numrefs = 0;
-       bool newent = false;
+       int rc;
 
        cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
 
-       down_write(&htable_rw_lock);
+       down_read(&htable_rw_lock);
 
        ce = lookup_cache_entry(path);
-       if (!IS_ERR(ce)) {
-               if (!cache_entry_expired(ce)) {
-                       dump_ce(ce);
-                       up_write(&htable_rw_lock);
-                       return 0;
-               }
-       } else {
-               newent = true;
+       if (!IS_ERR(ce) && !cache_entry_expired(ce)) {
+               up_read(&htable_rw_lock);
+               return 0;
        }
+       /*
+        * Unlock shared access as we don't want to hold any locks while getting
+        * a new referral.  The @ses used for performing the I/O could be
+        * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
+        * in order to failover -- if necessary.
+        */
+       up_read(&htable_rw_lock);
 
        /*
         * Either the entry was not found, or it is expired.
@@ -819,19 +820,22 @@ static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, cons
         */
        rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
        if (rc)
-               goto out_unlock;
+               goto out;
 
        dump_refs(refs, numrefs);
 
-       if (!newent) {
-               rc = update_cache_entry_locked(ce, refs, numrefs);
-               goto out_unlock;
+       down_write(&htable_rw_lock);
+       /* Re-check as another task might have it added or refreshed already */
+       ce = lookup_cache_entry(path);
+       if (!IS_ERR(ce)) {
+               if (cache_entry_expired(ce))
+                       rc = update_cache_entry_locked(ce, refs, numrefs);
+       } else {
+               rc = add_cache_entry_locked(refs, numrefs);
        }
 
-       rc = add_cache_entry_locked(refs, numrefs);
-
-out_unlock:
        up_write(&htable_rw_lock);
+out:
        free_dfs_info_array(refs, numrefs);
        return rc;
 }