drm/nouveau: fence: fix undefined fence state after emit
[platform/kernel/linux-rpi.git] / fs / smb / client / misc.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  */
8
9 #include <linux/slab.h>
10 #include <linux/ctype.h>
11 #include <linux/mempool.h>
12 #include <linux/vmalloc.h>
13 #include "cifspdu.h"
14 #include "cifsglob.h"
15 #include "cifsproto.h"
16 #include "cifs_debug.h"
17 #include "smberr.h"
18 #include "nterr.h"
19 #include "cifs_unicode.h"
20 #include "smb2pdu.h"
21 #include "cifsfs.h"
22 #ifdef CONFIG_CIFS_DFS_UPCALL
23 #include "dns_resolve.h"
24 #include "dfs_cache.h"
25 #include "dfs.h"
26 #endif
27 #include "fs_context.h"
28 #include "cached_dir.h"
29
30 extern mempool_t *cifs_sm_req_poolp;
31 extern mempool_t *cifs_req_poolp;
32
33 /* The xid serves as a useful identifier for each incoming vfs request,
34    in a similar way to the mid which is useful to track each sent smb,
35    and CurrentXid can also provide a running counter (although it
36    will eventually wrap past zero) of the total vfs operations handled
37    since the cifs fs was mounted */
38
39 unsigned int
40 _get_xid(void)
41 {
42         unsigned int xid;
43
44         spin_lock(&GlobalMid_Lock);
45         GlobalTotalActiveXid++;
46
47         /* keep high water mark for number of simultaneous ops in filesystem */
48         if (GlobalTotalActiveXid > GlobalMaxActiveXid)
49                 GlobalMaxActiveXid = GlobalTotalActiveXid;
50         if (GlobalTotalActiveXid > 65000)
51                 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
52         xid = GlobalCurrentXid++;
53         spin_unlock(&GlobalMid_Lock);
54         return xid;
55 }
56
57 void
58 _free_xid(unsigned int xid)
59 {
60         spin_lock(&GlobalMid_Lock);
61         /* if (GlobalTotalActiveXid == 0)
62                 BUG(); */
63         GlobalTotalActiveXid--;
64         spin_unlock(&GlobalMid_Lock);
65 }
66
67 struct cifs_ses *
68 sesInfoAlloc(void)
69 {
70         struct cifs_ses *ret_buf;
71
72         ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
73         if (ret_buf) {
74                 atomic_inc(&sesInfoAllocCount);
75                 spin_lock_init(&ret_buf->ses_lock);
76                 ret_buf->ses_status = SES_NEW;
77                 ++ret_buf->ses_count;
78                 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
79                 INIT_LIST_HEAD(&ret_buf->tcon_list);
80                 mutex_init(&ret_buf->session_mutex);
81                 spin_lock_init(&ret_buf->iface_lock);
82                 INIT_LIST_HEAD(&ret_buf->iface_list);
83                 spin_lock_init(&ret_buf->chan_lock);
84         }
85         return ret_buf;
86 }
87
88 void
89 sesInfoFree(struct cifs_ses *buf_to_free)
90 {
91         struct cifs_server_iface *iface = NULL, *niface = NULL;
92
93         if (buf_to_free == NULL) {
94                 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
95                 return;
96         }
97
98         atomic_dec(&sesInfoAllocCount);
99         kfree(buf_to_free->serverOS);
100         kfree(buf_to_free->serverDomain);
101         kfree(buf_to_free->serverNOS);
102         kfree_sensitive(buf_to_free->password);
103         kfree(buf_to_free->user_name);
104         kfree(buf_to_free->domainName);
105         kfree_sensitive(buf_to_free->auth_key.response);
106         spin_lock(&buf_to_free->iface_lock);
107         list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
108                                  iface_head)
109                 kref_put(&iface->refcount, release_iface);
110         spin_unlock(&buf_to_free->iface_lock);
111         kfree_sensitive(buf_to_free);
112 }
113
114 struct cifs_tcon *
115 tconInfoAlloc(void)
116 {
117         struct cifs_tcon *ret_buf;
118
119         ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
120         if (!ret_buf)
121                 return NULL;
122         ret_buf->cfids = init_cached_dirs();
123         if (!ret_buf->cfids) {
124                 kfree(ret_buf);
125                 return NULL;
126         }
127
128         atomic_inc(&tconInfoAllocCount);
129         ret_buf->status = TID_NEW;
130         ++ret_buf->tc_count;
131         spin_lock_init(&ret_buf->tc_lock);
132         INIT_LIST_HEAD(&ret_buf->openFileList);
133         INIT_LIST_HEAD(&ret_buf->tcon_list);
134         spin_lock_init(&ret_buf->open_file_lock);
135         spin_lock_init(&ret_buf->stat_lock);
136         atomic_set(&ret_buf->num_local_opens, 0);
137         atomic_set(&ret_buf->num_remote_opens, 0);
138 #ifdef CONFIG_CIFS_DFS_UPCALL
139         INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
140 #endif
141
142         return ret_buf;
143 }
144
145 void
146 tconInfoFree(struct cifs_tcon *tcon)
147 {
148         if (tcon == NULL) {
149                 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
150                 return;
151         }
152         free_cached_dirs(tcon->cfids);
153         atomic_dec(&tconInfoAllocCount);
154         kfree(tcon->nativeFileSystem);
155         kfree_sensitive(tcon->password);
156 #ifdef CONFIG_CIFS_DFS_UPCALL
157         dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
158 #endif
159         kfree(tcon->origin_fullpath);
160         kfree(tcon);
161 }
162
163 struct smb_hdr *
164 cifs_buf_get(void)
165 {
166         struct smb_hdr *ret_buf = NULL;
167         /*
168          * SMB2 header is bigger than CIFS one - no problems to clean some
169          * more bytes for CIFS.
170          */
171         size_t buf_size = sizeof(struct smb2_hdr);
172
173         /*
174          * We could use negotiated size instead of max_msgsize -
175          * but it may be more efficient to always alloc same size
176          * albeit slightly larger than necessary and maxbuffersize
177          * defaults to this and can not be bigger.
178          */
179         ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
180
181         /* clear the first few header bytes */
182         /* for most paths, more is cleared in header_assemble */
183         memset(ret_buf, 0, buf_size + 3);
184         atomic_inc(&buf_alloc_count);
185 #ifdef CONFIG_CIFS_STATS2
186         atomic_inc(&total_buf_alloc_count);
187 #endif /* CONFIG_CIFS_STATS2 */
188
189         return ret_buf;
190 }
191
192 void
193 cifs_buf_release(void *buf_to_free)
194 {
195         if (buf_to_free == NULL) {
196                 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
197                 return;
198         }
199         mempool_free(buf_to_free, cifs_req_poolp);
200
201         atomic_dec(&buf_alloc_count);
202         return;
203 }
204
205 struct smb_hdr *
206 cifs_small_buf_get(void)
207 {
208         struct smb_hdr *ret_buf = NULL;
209
210 /* We could use negotiated size instead of max_msgsize -
211    but it may be more efficient to always alloc same size
212    albeit slightly larger than necessary and maxbuffersize
213    defaults to this and can not be bigger */
214         ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
215         /* No need to clear memory here, cleared in header assemble */
216         /*      memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
217         atomic_inc(&small_buf_alloc_count);
218 #ifdef CONFIG_CIFS_STATS2
219         atomic_inc(&total_small_buf_alloc_count);
220 #endif /* CONFIG_CIFS_STATS2 */
221
222         return ret_buf;
223 }
224
225 void
226 cifs_small_buf_release(void *buf_to_free)
227 {
228
229         if (buf_to_free == NULL) {
230                 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
231                 return;
232         }
233         mempool_free(buf_to_free, cifs_sm_req_poolp);
234
235         atomic_dec(&small_buf_alloc_count);
236         return;
237 }
238
239 void
240 free_rsp_buf(int resp_buftype, void *rsp)
241 {
242         if (resp_buftype == CIFS_SMALL_BUFFER)
243                 cifs_small_buf_release(rsp);
244         else if (resp_buftype == CIFS_LARGE_BUFFER)
245                 cifs_buf_release(rsp);
246 }
247
248 /* NB: MID can not be set if treeCon not passed in, in that
249    case it is responsbility of caller to set the mid */
250 void
251 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
252                 const struct cifs_tcon *treeCon, int word_count
253                 /* length of fixed section (word count) in two byte units  */)
254 {
255         char *temp = (char *) buffer;
256
257         memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
258
259         buffer->smb_buf_length = cpu_to_be32(
260             (2 * word_count) + sizeof(struct smb_hdr) -
261             4 /*  RFC 1001 length field does not count */  +
262             2 /* for bcc field itself */) ;
263
264         buffer->Protocol[0] = 0xFF;
265         buffer->Protocol[1] = 'S';
266         buffer->Protocol[2] = 'M';
267         buffer->Protocol[3] = 'B';
268         buffer->Command = smb_command;
269         buffer->Flags = 0x00;   /* case sensitive */
270         buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
271         buffer->Pid = cpu_to_le16((__u16)current->tgid);
272         buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
273         if (treeCon) {
274                 buffer->Tid = treeCon->tid;
275                 if (treeCon->ses) {
276                         if (treeCon->ses->capabilities & CAP_UNICODE)
277                                 buffer->Flags2 |= SMBFLG2_UNICODE;
278                         if (treeCon->ses->capabilities & CAP_STATUS32)
279                                 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
280
281                         /* Uid is not converted */
282                         buffer->Uid = treeCon->ses->Suid;
283                         if (treeCon->ses->server)
284                                 buffer->Mid = get_next_mid(treeCon->ses->server);
285                 }
286                 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
287                         buffer->Flags2 |= SMBFLG2_DFS;
288                 if (treeCon->nocase)
289                         buffer->Flags  |= SMBFLG_CASELESS;
290                 if ((treeCon->ses) && (treeCon->ses->server))
291                         if (treeCon->ses->server->sign)
292                                 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
293         }
294
295 /*  endian conversion of flags is now done just before sending */
296         buffer->WordCount = (char) word_count;
297         return;
298 }
299
300 static int
301 check_smb_hdr(struct smb_hdr *smb)
302 {
303         /* does it have the right SMB "signature" ? */
304         if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
305                 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
306                          *(unsigned int *)smb->Protocol);
307                 return 1;
308         }
309
310         /* if it's a response then accept */
311         if (smb->Flags & SMBFLG_RESPONSE)
312                 return 0;
313
314         /* only one valid case where server sends us request */
315         if (smb->Command == SMB_COM_LOCKING_ANDX)
316                 return 0;
317
318         cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
319                  get_mid(smb));
320         return 1;
321 }
322
323 int
324 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
325 {
326         struct smb_hdr *smb = (struct smb_hdr *)buf;
327         __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
328         __u32 clc_len;  /* calculated length */
329         cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
330                  total_read, rfclen);
331
332         /* is this frame too small to even get to a BCC? */
333         if (total_read < 2 + sizeof(struct smb_hdr)) {
334                 if ((total_read >= sizeof(struct smb_hdr) - 1)
335                             && (smb->Status.CifsError != 0)) {
336                         /* it's an error return */
337                         smb->WordCount = 0;
338                         /* some error cases do not return wct and bcc */
339                         return 0;
340                 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
341                                 (smb->WordCount == 0)) {
342                         char *tmp = (char *)smb;
343                         /* Need to work around a bug in two servers here */
344                         /* First, check if the part of bcc they sent was zero */
345                         if (tmp[sizeof(struct smb_hdr)] == 0) {
346                                 /* some servers return only half of bcc
347                                  * on simple responses (wct, bcc both zero)
348                                  * in particular have seen this on
349                                  * ulogoffX and FindClose. This leaves
350                                  * one byte of bcc potentially unitialized
351                                  */
352                                 /* zero rest of bcc */
353                                 tmp[sizeof(struct smb_hdr)+1] = 0;
354                                 return 0;
355                         }
356                         cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
357                 } else {
358                         cifs_dbg(VFS, "Length less than smb header size\n");
359                 }
360                 return -EIO;
361         }
362
363         /* otherwise, there is enough to get to the BCC */
364         if (check_smb_hdr(smb))
365                 return -EIO;
366         clc_len = smbCalcSize(smb);
367
368         if (4 + rfclen != total_read) {
369                 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
370                          rfclen);
371                 return -EIO;
372         }
373
374         if (4 + rfclen != clc_len) {
375                 __u16 mid = get_mid(smb);
376                 /* check if bcc wrapped around for large read responses */
377                 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
378                         /* check if lengths match mod 64K */
379                         if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
380                                 return 0; /* bcc wrapped */
381                 }
382                 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
383                          clc_len, 4 + rfclen, mid);
384
385                 if (4 + rfclen < clc_len) {
386                         cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
387                                  rfclen, mid);
388                         return -EIO;
389                 } else if (rfclen > clc_len + 512) {
390                         /*
391                          * Some servers (Windows XP in particular) send more
392                          * data than the lengths in the SMB packet would
393                          * indicate on certain calls (byte range locks and
394                          * trans2 find first calls in particular). While the
395                          * client can handle such a frame by ignoring the
396                          * trailing data, we choose limit the amount of extra
397                          * data to 512 bytes.
398                          */
399                         cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
400                                  rfclen, mid);
401                         return -EIO;
402                 }
403         }
404         return 0;
405 }
406
407 bool
408 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
409 {
410         struct smb_hdr *buf = (struct smb_hdr *)buffer;
411         struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
412         struct TCP_Server_Info *pserver;
413         struct cifs_ses *ses;
414         struct cifs_tcon *tcon;
415         struct cifsInodeInfo *pCifsInode;
416         struct cifsFileInfo *netfile;
417
418         cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
419         if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
420            (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
421                 struct smb_com_transaction_change_notify_rsp *pSMBr =
422                         (struct smb_com_transaction_change_notify_rsp *)buf;
423                 struct file_notify_information *pnotify;
424                 __u32 data_offset = 0;
425                 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
426
427                 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
428                         data_offset = le32_to_cpu(pSMBr->DataOffset);
429
430                         if (data_offset >
431                             len - sizeof(struct file_notify_information)) {
432                                 cifs_dbg(FYI, "Invalid data_offset %u\n",
433                                          data_offset);
434                                 return true;
435                         }
436                         pnotify = (struct file_notify_information *)
437                                 ((char *)&pSMBr->hdr.Protocol + data_offset);
438                         cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
439                                  pnotify->FileName, pnotify->Action);
440                         /*   cifs_dump_mem("Rcvd notify Data: ",buf,
441                                 sizeof(struct smb_hdr)+60); */
442                         return true;
443                 }
444                 if (pSMBr->hdr.Status.CifsError) {
445                         cifs_dbg(FYI, "notify err 0x%x\n",
446                                  pSMBr->hdr.Status.CifsError);
447                         return true;
448                 }
449                 return false;
450         }
451         if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
452                 return false;
453         if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
454                 /* no sense logging error on invalid handle on oplock
455                    break - harmless race between close request and oplock
456                    break response is expected from time to time writing out
457                    large dirty files cached on the client */
458                 if ((NT_STATUS_INVALID_HANDLE) ==
459                    le32_to_cpu(pSMB->hdr.Status.CifsError)) {
460                         cifs_dbg(FYI, "Invalid handle on oplock break\n");
461                         return true;
462                 } else if (ERRbadfid ==
463                    le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
464                         return true;
465                 } else {
466                         return false; /* on valid oplock brk we get "request" */
467                 }
468         }
469         if (pSMB->hdr.WordCount != 8)
470                 return false;
471
472         cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
473                  pSMB->LockType, pSMB->OplockLevel);
474         if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
475                 return false;
476
477         /* If server is a channel, select the primary channel */
478         pserver = CIFS_SERVER_IS_CHAN(srv) ? srv->primary_server : srv;
479
480         /* look up tcon based on tid & uid */
481         spin_lock(&cifs_tcp_ses_lock);
482         list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
483                 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
484                         if (tcon->tid != buf->Tid)
485                                 continue;
486
487                         cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
488                         spin_lock(&tcon->open_file_lock);
489                         list_for_each_entry(netfile, &tcon->openFileList, tlist) {
490                                 if (pSMB->Fid != netfile->fid.netfid)
491                                         continue;
492
493                                 cifs_dbg(FYI, "file id match, oplock break\n");
494                                 pCifsInode = CIFS_I(d_inode(netfile->dentry));
495
496                                 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
497                                         &pCifsInode->flags);
498
499                                 netfile->oplock_epoch = 0;
500                                 netfile->oplock_level = pSMB->OplockLevel;
501                                 netfile->oplock_break_cancelled = false;
502                                 cifs_queue_oplock_break(netfile);
503
504                                 spin_unlock(&tcon->open_file_lock);
505                                 spin_unlock(&cifs_tcp_ses_lock);
506                                 return true;
507                         }
508                         spin_unlock(&tcon->open_file_lock);
509                         spin_unlock(&cifs_tcp_ses_lock);
510                         cifs_dbg(FYI, "No matching file for oplock break\n");
511                         return true;
512                 }
513         }
514         spin_unlock(&cifs_tcp_ses_lock);
515         cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
516         return true;
517 }
518
519 void
520 dump_smb(void *buf, int smb_buf_length)
521 {
522         if (traceSMB == 0)
523                 return;
524
525         print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
526                        smb_buf_length, true);
527 }
528
529 void
530 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
531 {
532         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
533                 struct cifs_tcon *tcon = NULL;
534
535                 if (cifs_sb->master_tlink)
536                         tcon = cifs_sb_master_tcon(cifs_sb);
537
538                 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
539                 cifs_sb->mnt_cifs_serverino_autodisabled = true;
540                 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
541                          tcon ? tcon->tree_name : "new server");
542                 cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
543                 cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
544
545         }
546 }
547
548 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
549 {
550         oplock &= 0xF;
551
552         if (oplock == OPLOCK_EXCLUSIVE) {
553                 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
554                 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
555                          &cinode->netfs.inode);
556         } else if (oplock == OPLOCK_READ) {
557                 cinode->oplock = CIFS_CACHE_READ_FLG;
558                 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
559                          &cinode->netfs.inode);
560         } else
561                 cinode->oplock = 0;
562 }
563
564 /*
565  * We wait for oplock breaks to be processed before we attempt to perform
566  * writes.
567  */
568 int cifs_get_writer(struct cifsInodeInfo *cinode)
569 {
570         int rc;
571
572 start:
573         rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
574                          TASK_KILLABLE);
575         if (rc)
576                 return rc;
577
578         spin_lock(&cinode->writers_lock);
579         if (!cinode->writers)
580                 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
581         cinode->writers++;
582         /* Check to see if we have started servicing an oplock break */
583         if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
584                 cinode->writers--;
585                 if (cinode->writers == 0) {
586                         clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
587                         wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
588                 }
589                 spin_unlock(&cinode->writers_lock);
590                 goto start;
591         }
592         spin_unlock(&cinode->writers_lock);
593         return 0;
594 }
595
596 void cifs_put_writer(struct cifsInodeInfo *cinode)
597 {
598         spin_lock(&cinode->writers_lock);
599         cinode->writers--;
600         if (cinode->writers == 0) {
601                 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
602                 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
603         }
604         spin_unlock(&cinode->writers_lock);
605 }
606
607 /**
608  * cifs_queue_oplock_break - queue the oplock break handler for cfile
609  * @cfile: The file to break the oplock on
610  *
611  * This function is called from the demultiplex thread when it
612  * receives an oplock break for @cfile.
613  *
614  * Assumes the tcon->open_file_lock is held.
615  * Assumes cfile->file_info_lock is NOT held.
616  */
617 void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
618 {
619         /*
620          * Bump the handle refcount now while we hold the
621          * open_file_lock to enforce the validity of it for the oplock
622          * break handler. The matching put is done at the end of the
623          * handler.
624          */
625         cifsFileInfo_get(cfile);
626
627         queue_work(cifsoplockd_wq, &cfile->oplock_break);
628 }
629
630 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
631 {
632         clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
633         wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
634 }
635
636 bool
637 backup_cred(struct cifs_sb_info *cifs_sb)
638 {
639         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
640                 if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
641                         return true;
642         }
643         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
644                 if (in_group_p(cifs_sb->ctx->backupgid))
645                         return true;
646         }
647
648         return false;
649 }
650
651 void
652 cifs_del_pending_open(struct cifs_pending_open *open)
653 {
654         spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
655         list_del(&open->olist);
656         spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
657 }
658
659 void
660 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
661                              struct cifs_pending_open *open)
662 {
663         memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
664         open->oplock = CIFS_OPLOCK_NO_CHANGE;
665         open->tlink = tlink;
666         fid->pending_open = open;
667         list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
668 }
669
670 void
671 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
672                       struct cifs_pending_open *open)
673 {
674         spin_lock(&tlink_tcon(tlink)->open_file_lock);
675         cifs_add_pending_open_locked(fid, tlink, open);
676         spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
677 }
678
679 /*
680  * Critical section which runs after acquiring deferred_lock.
681  * As there is no reference count on cifs_deferred_close, pdclose
682  * should not be used outside deferred_lock.
683  */
684 bool
685 cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
686 {
687         struct cifs_deferred_close *dclose;
688
689         list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
690                 if ((dclose->netfid == cfile->fid.netfid) &&
691                         (dclose->persistent_fid == cfile->fid.persistent_fid) &&
692                         (dclose->volatile_fid == cfile->fid.volatile_fid)) {
693                         *pdclose = dclose;
694                         return true;
695                 }
696         }
697         return false;
698 }
699
700 /*
701  * Critical section which runs after acquiring deferred_lock.
702  */
703 void
704 cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
705 {
706         bool is_deferred = false;
707         struct cifs_deferred_close *pdclose;
708
709         is_deferred = cifs_is_deferred_close(cfile, &pdclose);
710         if (is_deferred) {
711                 kfree(dclose);
712                 return;
713         }
714
715         dclose->tlink = cfile->tlink;
716         dclose->netfid = cfile->fid.netfid;
717         dclose->persistent_fid = cfile->fid.persistent_fid;
718         dclose->volatile_fid = cfile->fid.volatile_fid;
719         list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
720 }
721
722 /*
723  * Critical section which runs after acquiring deferred_lock.
724  */
725 void
726 cifs_del_deferred_close(struct cifsFileInfo *cfile)
727 {
728         bool is_deferred = false;
729         struct cifs_deferred_close *dclose;
730
731         is_deferred = cifs_is_deferred_close(cfile, &dclose);
732         if (!is_deferred)
733                 return;
734         list_del(&dclose->dlist);
735         kfree(dclose);
736 }
737
738 void
739 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
740 {
741         struct cifsFileInfo *cfile = NULL;
742         struct file_list *tmp_list, *tmp_next_list;
743         struct list_head file_head;
744
745         if (cifs_inode == NULL)
746                 return;
747
748         INIT_LIST_HEAD(&file_head);
749         spin_lock(&cifs_inode->open_file_lock);
750         list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
751                 if (delayed_work_pending(&cfile->deferred)) {
752                         if (cancel_delayed_work(&cfile->deferred)) {
753                                 spin_lock(&cifs_inode->deferred_lock);
754                                 cifs_del_deferred_close(cfile);
755                                 spin_unlock(&cifs_inode->deferred_lock);
756
757                                 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
758                                 if (tmp_list == NULL)
759                                         break;
760                                 tmp_list->cfile = cfile;
761                                 list_add_tail(&tmp_list->list, &file_head);
762                         }
763                 }
764         }
765         spin_unlock(&cifs_inode->open_file_lock);
766
767         list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
768                 _cifsFileInfo_put(tmp_list->cfile, false, false);
769                 list_del(&tmp_list->list);
770                 kfree(tmp_list);
771         }
772 }
773
774 void
775 cifs_close_all_deferred_files(struct cifs_tcon *tcon)
776 {
777         struct cifsFileInfo *cfile;
778         struct file_list *tmp_list, *tmp_next_list;
779         struct list_head file_head;
780
781         INIT_LIST_HEAD(&file_head);
782         spin_lock(&tcon->open_file_lock);
783         list_for_each_entry(cfile, &tcon->openFileList, tlist) {
784                 if (delayed_work_pending(&cfile->deferred)) {
785                         if (cancel_delayed_work(&cfile->deferred)) {
786                                 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
787                                 cifs_del_deferred_close(cfile);
788                                 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
789
790                                 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
791                                 if (tmp_list == NULL)
792                                         break;
793                                 tmp_list->cfile = cfile;
794                                 list_add_tail(&tmp_list->list, &file_head);
795                         }
796                 }
797         }
798         spin_unlock(&tcon->open_file_lock);
799
800         list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
801                 _cifsFileInfo_put(tmp_list->cfile, true, false);
802                 list_del(&tmp_list->list);
803                 kfree(tmp_list);
804         }
805 }
806 void
807 cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
808 {
809         struct cifsFileInfo *cfile;
810         struct file_list *tmp_list, *tmp_next_list;
811         struct list_head file_head;
812         void *page;
813         const char *full_path;
814
815         INIT_LIST_HEAD(&file_head);
816         page = alloc_dentry_path();
817         spin_lock(&tcon->open_file_lock);
818         list_for_each_entry(cfile, &tcon->openFileList, tlist) {
819                 full_path = build_path_from_dentry(cfile->dentry, page);
820                 if (strstr(full_path, path)) {
821                         if (delayed_work_pending(&cfile->deferred)) {
822                                 if (cancel_delayed_work(&cfile->deferred)) {
823                                         spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
824                                         cifs_del_deferred_close(cfile);
825                                         spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
826
827                                         tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
828                                         if (tmp_list == NULL)
829                                                 break;
830                                         tmp_list->cfile = cfile;
831                                         list_add_tail(&tmp_list->list, &file_head);
832                                 }
833                         }
834                 }
835         }
836         spin_unlock(&tcon->open_file_lock);
837
838         list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
839                 _cifsFileInfo_put(tmp_list->cfile, true, false);
840                 list_del(&tmp_list->list);
841                 kfree(tmp_list);
842         }
843         free_dentry_path(page);
844 }
845
846 /* parses DFS referral V3 structure
847  * caller is responsible for freeing target_nodes
848  * returns:
849  * - on success - 0
850  * - on failure - errno
851  */
852 int
853 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
854                     unsigned int *num_of_nodes,
855                     struct dfs_info3_param **target_nodes,
856                     const struct nls_table *nls_codepage, int remap,
857                     const char *searchName, bool is_unicode)
858 {
859         int i, rc = 0;
860         char *data_end;
861         struct dfs_referral_level_3 *ref;
862
863         *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
864
865         if (*num_of_nodes < 1) {
866                 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
867                          *num_of_nodes);
868                 rc = -EINVAL;
869                 goto parse_DFS_referrals_exit;
870         }
871
872         ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
873         if (ref->VersionNumber != cpu_to_le16(3)) {
874                 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
875                          le16_to_cpu(ref->VersionNumber));
876                 rc = -EINVAL;
877                 goto parse_DFS_referrals_exit;
878         }
879
880         /* get the upper boundary of the resp buffer */
881         data_end = (char *)rsp + rsp_size;
882
883         cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
884                  *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
885
886         *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
887                                 GFP_KERNEL);
888         if (*target_nodes == NULL) {
889                 rc = -ENOMEM;
890                 goto parse_DFS_referrals_exit;
891         }
892
893         /* collect necessary data from referrals */
894         for (i = 0; i < *num_of_nodes; i++) {
895                 char *temp;
896                 int max_len;
897                 struct dfs_info3_param *node = (*target_nodes)+i;
898
899                 node->flags = le32_to_cpu(rsp->DFSFlags);
900                 if (is_unicode) {
901                         __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
902                                                 GFP_KERNEL);
903                         if (tmp == NULL) {
904                                 rc = -ENOMEM;
905                                 goto parse_DFS_referrals_exit;
906                         }
907                         cifsConvertToUTF16((__le16 *) tmp, searchName,
908                                            PATH_MAX, nls_codepage, remap);
909                         node->path_consumed = cifs_utf16_bytes(tmp,
910                                         le16_to_cpu(rsp->PathConsumed),
911                                         nls_codepage);
912                         kfree(tmp);
913                 } else
914                         node->path_consumed = le16_to_cpu(rsp->PathConsumed);
915
916                 node->server_type = le16_to_cpu(ref->ServerType);
917                 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
918
919                 /* copy DfsPath */
920                 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
921                 max_len = data_end - temp;
922                 node->path_name = cifs_strndup_from_utf16(temp, max_len,
923                                                 is_unicode, nls_codepage);
924                 if (!node->path_name) {
925                         rc = -ENOMEM;
926                         goto parse_DFS_referrals_exit;
927                 }
928
929                 /* copy link target UNC */
930                 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
931                 max_len = data_end - temp;
932                 node->node_name = cifs_strndup_from_utf16(temp, max_len,
933                                                 is_unicode, nls_codepage);
934                 if (!node->node_name) {
935                         rc = -ENOMEM;
936                         goto parse_DFS_referrals_exit;
937                 }
938
939                 node->ttl = le32_to_cpu(ref->TimeToLive);
940
941                 ref++;
942         }
943
944 parse_DFS_referrals_exit:
945         if (rc) {
946                 free_dfs_info_array(*target_nodes, *num_of_nodes);
947                 *target_nodes = NULL;
948                 *num_of_nodes = 0;
949         }
950         return rc;
951 }
952
953 struct cifs_aio_ctx *
954 cifs_aio_ctx_alloc(void)
955 {
956         struct cifs_aio_ctx *ctx;
957
958         /*
959          * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
960          * to false so that we know when we have to unreference pages within
961          * cifs_aio_ctx_release()
962          */
963         ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
964         if (!ctx)
965                 return NULL;
966
967         INIT_LIST_HEAD(&ctx->list);
968         mutex_init(&ctx->aio_mutex);
969         init_completion(&ctx->done);
970         kref_init(&ctx->refcount);
971         return ctx;
972 }
973
974 void
975 cifs_aio_ctx_release(struct kref *refcount)
976 {
977         struct cifs_aio_ctx *ctx = container_of(refcount,
978                                         struct cifs_aio_ctx, refcount);
979
980         cifsFileInfo_put(ctx->cfile);
981
982         /*
983          * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
984          * which means that iov_iter_extract_pages() was a success and thus
985          * that we may have references or pins on pages that we need to
986          * release.
987          */
988         if (ctx->bv) {
989                 if (ctx->should_dirty || ctx->bv_need_unpin) {
990                         unsigned int i;
991
992                         for (i = 0; i < ctx->nr_pinned_pages; i++) {
993                                 struct page *page = ctx->bv[i].bv_page;
994
995                                 if (ctx->should_dirty)
996                                         set_page_dirty(page);
997                                 if (ctx->bv_need_unpin)
998                                         unpin_user_page(page);
999                         }
1000                 }
1001                 kvfree(ctx->bv);
1002         }
1003
1004         kfree(ctx);
1005 }
1006
1007 /**
1008  * cifs_alloc_hash - allocate hash and hash context together
1009  * @name: The name of the crypto hash algo
1010  * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
1011  *
1012  * The caller has to make sure @sdesc is initialized to either NULL or
1013  * a valid context. It can be freed via cifs_free_hash().
1014  */
1015 int
1016 cifs_alloc_hash(const char *name, struct shash_desc **sdesc)
1017 {
1018         int rc = 0;
1019         struct crypto_shash *alg = NULL;
1020
1021         if (*sdesc)
1022                 return 0;
1023
1024         alg = crypto_alloc_shash(name, 0, 0);
1025         if (IS_ERR(alg)) {
1026                 cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name);
1027                 rc = PTR_ERR(alg);
1028                 *sdesc = NULL;
1029                 return rc;
1030         }
1031
1032         *sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL);
1033         if (*sdesc == NULL) {
1034                 cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
1035                 crypto_free_shash(alg);
1036                 return -ENOMEM;
1037         }
1038
1039         (*sdesc)->tfm = alg;
1040         return 0;
1041 }
1042
1043 /**
1044  * cifs_free_hash - free hash and hash context together
1045  * @sdesc: Where to find the pointer to the hash TFM
1046  *
1047  * Freeing a NULL descriptor is safe.
1048  */
1049 void
1050 cifs_free_hash(struct shash_desc **sdesc)
1051 {
1052         if (unlikely(!sdesc) || !*sdesc)
1053                 return;
1054
1055         if ((*sdesc)->tfm) {
1056                 crypto_free_shash((*sdesc)->tfm);
1057                 (*sdesc)->tfm = NULL;
1058         }
1059
1060         kfree_sensitive(*sdesc);
1061         *sdesc = NULL;
1062 }
1063
1064 void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1065 {
1066         const char *end;
1067
1068         /* skip initial slashes */
1069         while (*unc && (*unc == '\\' || *unc == '/'))
1070                 unc++;
1071
1072         end = unc;
1073
1074         while (*end && !(*end == '\\' || *end == '/'))
1075                 end++;
1076
1077         *h = unc;
1078         *len = end - unc;
1079 }
1080
1081 /**
1082  * copy_path_name - copy src path to dst, possibly truncating
1083  * @dst: The destination buffer
1084  * @src: The source name
1085  *
1086  * returns number of bytes written (including trailing nul)
1087  */
1088 int copy_path_name(char *dst, const char *src)
1089 {
1090         int name_len;
1091
1092         /*
1093          * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1094          * will truncate and strlen(dst) will be PATH_MAX-1
1095          */
1096         name_len = strscpy(dst, src, PATH_MAX);
1097         if (WARN_ON_ONCE(name_len < 0))
1098                 name_len = PATH_MAX-1;
1099
1100         /* we count the trailing nul */
1101         name_len++;
1102         return name_len;
1103 }
1104
1105 struct super_cb_data {
1106         void *data;
1107         struct super_block *sb;
1108 };
1109
1110 static void tcon_super_cb(struct super_block *sb, void *arg)
1111 {
1112         struct super_cb_data *sd = arg;
1113         struct cifs_sb_info *cifs_sb;
1114         struct cifs_tcon *t1 = sd->data, *t2;
1115
1116         if (sd->sb)
1117                 return;
1118
1119         cifs_sb = CIFS_SB(sb);
1120         t2 = cifs_sb_master_tcon(cifs_sb);
1121
1122         spin_lock(&t2->tc_lock);
1123         if (t1->ses == t2->ses &&
1124             t1->ses->server == t2->ses->server &&
1125             t2->origin_fullpath &&
1126             dfs_src_pathname_equal(t2->origin_fullpath, t1->origin_fullpath))
1127                 sd->sb = sb;
1128         spin_unlock(&t2->tc_lock);
1129 }
1130
1131 static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
1132                                             void *data)
1133 {
1134         struct super_cb_data sd = {
1135                 .data = data,
1136                 .sb = NULL,
1137         };
1138         struct file_system_type **fs_type = (struct file_system_type *[]) {
1139                 &cifs_fs_type, &smb3_fs_type, NULL,
1140         };
1141
1142         for (; *fs_type; fs_type++) {
1143                 iterate_supers_type(*fs_type, f, &sd);
1144                 if (sd.sb) {
1145                         /*
1146                          * Grab an active reference in order to prevent automounts (DFS links)
1147                          * of expiring and then freeing up our cifs superblock pointer while
1148                          * we're doing failover.
1149                          */
1150                         cifs_sb_active(sd.sb);
1151                         return sd.sb;
1152                 }
1153         }
1154         pr_warn_once("%s: could not find dfs superblock\n", __func__);
1155         return ERR_PTR(-EINVAL);
1156 }
1157
1158 static void __cifs_put_super(struct super_block *sb)
1159 {
1160         if (!IS_ERR_OR_NULL(sb))
1161                 cifs_sb_deactive(sb);
1162 }
1163
1164 struct super_block *cifs_get_dfs_tcon_super(struct cifs_tcon *tcon)
1165 {
1166         spin_lock(&tcon->tc_lock);
1167         if (!tcon->origin_fullpath) {
1168                 spin_unlock(&tcon->tc_lock);
1169                 return ERR_PTR(-ENOENT);
1170         }
1171         spin_unlock(&tcon->tc_lock);
1172         return __cifs_get_super(tcon_super_cb, tcon);
1173 }
1174
1175 void cifs_put_tcp_super(struct super_block *sb)
1176 {
1177         __cifs_put_super(sb);
1178 }
1179
1180 #ifdef CONFIG_CIFS_DFS_UPCALL
1181 int match_target_ip(struct TCP_Server_Info *server,
1182                     const char *share, size_t share_len,
1183                     bool *result)
1184 {
1185         int rc;
1186         char *target;
1187         struct sockaddr_storage ss;
1188
1189         *result = false;
1190
1191         target = kzalloc(share_len + 3, GFP_KERNEL);
1192         if (!target)
1193                 return -ENOMEM;
1194
1195         scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
1196
1197         cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
1198
1199         rc = dns_resolve_server_name_to_ip(target, (struct sockaddr *)&ss, NULL);
1200         kfree(target);
1201
1202         if (rc < 0)
1203                 return rc;
1204
1205         spin_lock(&server->srv_lock);
1206         *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1207         spin_unlock(&server->srv_lock);
1208         cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
1209         return 0;
1210 }
1211
1212 int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
1213 {
1214         int rc;
1215
1216         kfree(cifs_sb->prepath);
1217         cifs_sb->prepath = NULL;
1218
1219         if (prefix && *prefix) {
1220                 cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
1221                 if (IS_ERR(cifs_sb->prepath)) {
1222                         rc = PTR_ERR(cifs_sb->prepath);
1223                         cifs_sb->prepath = NULL;
1224                         return rc;
1225                 }
1226                 if (cifs_sb->prepath)
1227                         convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
1228         }
1229
1230         cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1231         return 0;
1232 }
1233
1234 /*
1235  * Handle weird Windows SMB server behaviour. It responds with
1236  * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
1237  * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
1238  * non-ASCII unicode symbols.
1239  */
1240 int cifs_inval_name_dfs_link_error(const unsigned int xid,
1241                                    struct cifs_tcon *tcon,
1242                                    struct cifs_sb_info *cifs_sb,
1243                                    const char *full_path,
1244                                    bool *islink)
1245 {
1246         struct cifs_ses *ses = tcon->ses;
1247         size_t len;
1248         char *path;
1249         char *ref_path;
1250
1251         *islink = false;
1252
1253         /*
1254          * Fast path - skip check when @full_path doesn't have a prefix path to
1255          * look up or tcon is not DFS.
1256          */
1257         if (strlen(full_path) < 2 || !cifs_sb ||
1258             (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
1259             !is_tcon_dfs(tcon))
1260                 return 0;
1261
1262         spin_lock(&tcon->tc_lock);
1263         if (!tcon->origin_fullpath) {
1264                 spin_unlock(&tcon->tc_lock);
1265                 return 0;
1266         }
1267         spin_unlock(&tcon->tc_lock);
1268
1269         /*
1270          * Slow path - tcon is DFS and @full_path has prefix path, so attempt
1271          * to get a referral to figure out whether it is an DFS link.
1272          */
1273         len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
1274         path = kmalloc(len, GFP_KERNEL);
1275         if (!path)
1276                 return -ENOMEM;
1277
1278         scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
1279         ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
1280                                             cifs_remap(cifs_sb));
1281         kfree(path);
1282
1283         if (IS_ERR(ref_path)) {
1284                 if (PTR_ERR(ref_path) != -EINVAL)
1285                         return PTR_ERR(ref_path);
1286         } else {
1287                 struct dfs_info3_param *refs = NULL;
1288                 int num_refs = 0;
1289
1290                 /*
1291                  * XXX: we are not using dfs_cache_find() here because we might
1292                  * end up filling all the DFS cache and thus potentially
1293                  * removing cached DFS targets that the client would eventually
1294                  * need during failover.
1295                  */
1296                 ses = CIFS_DFS_ROOT_SES(ses);
1297                 if (ses->server->ops->get_dfs_refer &&
1298                     !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
1299                                                      &num_refs, cifs_sb->local_nls,
1300                                                      cifs_remap(cifs_sb)))
1301                         *islink = refs[0].server_type == DFS_TYPE_LINK;
1302                 free_dfs_info_array(refs, num_refs);
1303                 kfree(ref_path);
1304         }
1305         return 0;
1306 }
1307 #endif
1308
1309 int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
1310 {
1311         int timeout = 10;
1312         int rc;
1313
1314         spin_lock(&server->srv_lock);
1315         if (server->tcpStatus != CifsNeedReconnect) {
1316                 spin_unlock(&server->srv_lock);
1317                 return 0;
1318         }
1319         timeout *= server->nr_targets;
1320         spin_unlock(&server->srv_lock);
1321
1322         /*
1323          * Give demultiplex thread up to 10 seconds to each target available for
1324          * reconnect -- should be greater than cifs socket timeout which is 7
1325          * seconds.
1326          *
1327          * On "soft" mounts we wait once. Hard mounts keep retrying until
1328          * process is killed or server comes back on-line.
1329          */
1330         do {
1331                 rc = wait_event_interruptible_timeout(server->response_q,
1332                                                       (server->tcpStatus != CifsNeedReconnect),
1333                                                       timeout * HZ);
1334                 if (rc < 0) {
1335                         cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
1336                                  __func__);
1337                         return -ERESTARTSYS;
1338                 }
1339
1340                 /* are we still trying to reconnect? */
1341                 spin_lock(&server->srv_lock);
1342                 if (server->tcpStatus != CifsNeedReconnect) {
1343                         spin_unlock(&server->srv_lock);
1344                         return 0;
1345                 }
1346                 spin_unlock(&server->srv_lock);
1347         } while (retry);
1348
1349         cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
1350         return -EHOSTDOWN;
1351 }