Merge tag 'riscv-for-linus-5.15-rc5' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-starfive.git] / fs / cifs / transport.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <asm/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include "cifspdu.h"
25 #include "cifsglob.h"
26 #include "cifsproto.h"
27 #include "cifs_debug.h"
28 #include "smb2proto.h"
29 #include "smbdirect.h"
30
31 /* Max number of iovectors we can use off the stack when sending requests. */
32 #define CIFS_MAX_IOV_SIZE 8
33
34 void
35 cifs_wake_up_task(struct mid_q_entry *mid)
36 {
37         wake_up_process(mid->callback_data);
38 }
39
40 struct mid_q_entry *
41 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
42 {
43         struct mid_q_entry *temp;
44
45         if (server == NULL) {
46                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
47                 return NULL;
48         }
49
50         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
51         memset(temp, 0, sizeof(struct mid_q_entry));
52         kref_init(&temp->refcount);
53         temp->mid = get_mid(smb_buffer);
54         temp->pid = current->pid;
55         temp->command = cpu_to_le16(smb_buffer->Command);
56         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
57         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
58         /* when mid allocated can be before when sent */
59         temp->when_alloc = jiffies;
60         temp->server = server;
61
62         /*
63          * The default is for the mid to be synchronous, so the
64          * default callback just wakes up the current task.
65          */
66         get_task_struct(current);
67         temp->creator = current;
68         temp->callback = cifs_wake_up_task;
69         temp->callback_data = current;
70
71         atomic_inc(&midCount);
72         temp->mid_state = MID_REQUEST_ALLOCATED;
73         return temp;
74 }
75
76 static void _cifs_mid_q_entry_release(struct kref *refcount)
77 {
78         struct mid_q_entry *midEntry =
79                         container_of(refcount, struct mid_q_entry, refcount);
80 #ifdef CONFIG_CIFS_STATS2
81         __le16 command = midEntry->server->vals->lock_cmd;
82         __u16 smb_cmd = le16_to_cpu(midEntry->command);
83         unsigned long now;
84         unsigned long roundtrip_time;
85 #endif
86         struct TCP_Server_Info *server = midEntry->server;
87
88         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
89             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
90             server->ops->handle_cancelled_mid)
91                 server->ops->handle_cancelled_mid(midEntry, server);
92
93         midEntry->mid_state = MID_FREE;
94         atomic_dec(&midCount);
95         if (midEntry->large_buf)
96                 cifs_buf_release(midEntry->resp_buf);
97         else
98                 cifs_small_buf_release(midEntry->resp_buf);
99 #ifdef CONFIG_CIFS_STATS2
100         now = jiffies;
101         if (now < midEntry->when_alloc)
102                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
103         roundtrip_time = now - midEntry->when_alloc;
104
105         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
106                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
107                         server->slowest_cmd[smb_cmd] = roundtrip_time;
108                         server->fastest_cmd[smb_cmd] = roundtrip_time;
109                 } else {
110                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
111                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
112                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
113                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
114                 }
115                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
116                 server->time_per_cmd[smb_cmd] += roundtrip_time;
117         }
118         /*
119          * commands taking longer than one second (default) can be indications
120          * that something is wrong, unless it is quite a slow link or a very
121          * busy server. Note that this calc is unlikely or impossible to wrap
122          * as long as slow_rsp_threshold is not set way above recommended max
123          * value (32767 ie 9 hours) and is generally harmless even if wrong
124          * since only affects debug counters - so leaving the calc as simple
125          * comparison rather than doing multiple conversions and overflow
126          * checks
127          */
128         if ((slow_rsp_threshold != 0) &&
129             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
130             (midEntry->command != command)) {
131                 /*
132                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
133                  * NB: le16_to_cpu returns unsigned so can not be negative below
134                  */
135                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
136                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
137
138                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
139                                midEntry->when_sent, midEntry->when_received);
140                 if (cifsFYI & CIFS_TIMER) {
141                         pr_debug("slow rsp: cmd %d mid %llu",
142                                  midEntry->command, midEntry->mid);
143                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
144                                   now - midEntry->when_alloc,
145                                   now - midEntry->when_sent,
146                                   now - midEntry->when_received);
147                 }
148         }
149 #endif
150         put_task_struct(midEntry->creator);
151
152         mempool_free(midEntry, cifs_mid_poolp);
153 }
154
155 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
156 {
157         spin_lock(&GlobalMid_Lock);
158         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
159         spin_unlock(&GlobalMid_Lock);
160 }
161
162 void DeleteMidQEntry(struct mid_q_entry *midEntry)
163 {
164         cifs_mid_q_entry_release(midEntry);
165 }
166
167 void
168 cifs_delete_mid(struct mid_q_entry *mid)
169 {
170         spin_lock(&GlobalMid_Lock);
171         if (!(mid->mid_flags & MID_DELETED)) {
172                 list_del_init(&mid->qhead);
173                 mid->mid_flags |= MID_DELETED;
174         }
175         spin_unlock(&GlobalMid_Lock);
176
177         DeleteMidQEntry(mid);
178 }
179
180 /*
181  * smb_send_kvec - send an array of kvecs to the server
182  * @server:     Server to send the data to
183  * @smb_msg:    Message to send
184  * @sent:       amount of data sent on socket is stored here
185  *
186  * Our basic "send data to server" function. Should be called with srv_mutex
187  * held. The caller is responsible for handling the results.
188  */
189 static int
190 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
191               size_t *sent)
192 {
193         int rc = 0;
194         int retries = 0;
195         struct socket *ssocket = server->ssocket;
196
197         *sent = 0;
198
199         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
200         smb_msg->msg_namelen = sizeof(struct sockaddr);
201         smb_msg->msg_control = NULL;
202         smb_msg->msg_controllen = 0;
203         if (server->noblocksnd)
204                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
205         else
206                 smb_msg->msg_flags = MSG_NOSIGNAL;
207
208         while (msg_data_left(smb_msg)) {
209                 /*
210                  * If blocking send, we try 3 times, since each can block
211                  * for 5 seconds. For nonblocking  we have to try more
212                  * but wait increasing amounts of time allowing time for
213                  * socket to clear.  The overall time we wait in either
214                  * case to send on the socket is about 15 seconds.
215                  * Similarly we wait for 15 seconds for a response from
216                  * the server in SendReceive[2] for the server to send
217                  * a response back for most types of requests (except
218                  * SMB Write past end of file which can be slow, and
219                  * blocking lock operations). NFS waits slightly longer
220                  * than CIFS, but this can make it take longer for
221                  * nonresponsive servers to be detected and 15 seconds
222                  * is more than enough time for modern networks to
223                  * send a packet.  In most cases if we fail to send
224                  * after the retries we will kill the socket and
225                  * reconnect which may clear the network problem.
226                  */
227                 rc = sock_sendmsg(ssocket, smb_msg);
228                 if (rc == -EAGAIN) {
229                         retries++;
230                         if (retries >= 14 ||
231                             (!server->noblocksnd && (retries > 2))) {
232                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
233                                          ssocket);
234                                 return -EAGAIN;
235                         }
236                         msleep(1 << retries);
237                         continue;
238                 }
239
240                 if (rc < 0)
241                         return rc;
242
243                 if (rc == 0) {
244                         /* should never happen, letting socket clear before
245                            retrying is our only obvious option here */
246                         cifs_server_dbg(VFS, "tcp sent no data\n");
247                         msleep(500);
248                         continue;
249                 }
250
251                 /* send was at least partially successful */
252                 *sent += rc;
253                 retries = 0; /* in case we get ENOSPC on the next send */
254         }
255         return 0;
256 }
257
258 unsigned long
259 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
260 {
261         unsigned int i;
262         struct kvec *iov;
263         int nvec;
264         unsigned long buflen = 0;
265
266         if (server->vals->header_preamble_size == 0 &&
267             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
268                 iov = &rqst->rq_iov[1];
269                 nvec = rqst->rq_nvec - 1;
270         } else {
271                 iov = rqst->rq_iov;
272                 nvec = rqst->rq_nvec;
273         }
274
275         /* total up iov array first */
276         for (i = 0; i < nvec; i++)
277                 buflen += iov[i].iov_len;
278
279         /*
280          * Add in the page array if there is one. The caller needs to make
281          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
282          * multiple pages ends at page boundary, rq_tailsz needs to be set to
283          * PAGE_SIZE.
284          */
285         if (rqst->rq_npages) {
286                 if (rqst->rq_npages == 1)
287                         buflen += rqst->rq_tailsz;
288                 else {
289                         /*
290                          * If there is more than one page, calculate the
291                          * buffer length based on rq_offset and rq_tailsz
292                          */
293                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
294                                         rqst->rq_offset;
295                         buflen += rqst->rq_tailsz;
296                 }
297         }
298
299         return buflen;
300 }
301
302 static int
303 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
304                 struct smb_rqst *rqst)
305 {
306         int rc = 0;
307         struct kvec *iov;
308         int n_vec;
309         unsigned int send_length = 0;
310         unsigned int i, j;
311         sigset_t mask, oldmask;
312         size_t total_len = 0, sent, size;
313         struct socket *ssocket = server->ssocket;
314         struct msghdr smb_msg;
315         __be32 rfc1002_marker;
316
317         if (cifs_rdma_enabled(server)) {
318                 /* return -EAGAIN when connecting or reconnecting */
319                 rc = -EAGAIN;
320                 if (server->smbd_conn)
321                         rc = smbd_send(server, num_rqst, rqst);
322                 goto smbd_done;
323         }
324
325         if (ssocket == NULL)
326                 return -EAGAIN;
327
328         if (fatal_signal_pending(current)) {
329                 cifs_dbg(FYI, "signal pending before send request\n");
330                 return -ERESTARTSYS;
331         }
332
333         /* cork the socket */
334         tcp_sock_set_cork(ssocket->sk, true);
335
336         for (j = 0; j < num_rqst; j++)
337                 send_length += smb_rqst_len(server, &rqst[j]);
338         rfc1002_marker = cpu_to_be32(send_length);
339
340         /*
341          * We should not allow signals to interrupt the network send because
342          * any partial send will cause session reconnects thus increasing
343          * latency of system calls and overload a server with unnecessary
344          * requests.
345          */
346
347         sigfillset(&mask);
348         sigprocmask(SIG_BLOCK, &mask, &oldmask);
349
350         /* Generate a rfc1002 marker for SMB2+ */
351         if (server->vals->header_preamble_size == 0) {
352                 struct kvec hiov = {
353                         .iov_base = &rfc1002_marker,
354                         .iov_len  = 4
355                 };
356                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
357                 rc = smb_send_kvec(server, &smb_msg, &sent);
358                 if (rc < 0)
359                         goto unmask;
360
361                 total_len += sent;
362                 send_length += 4;
363         }
364
365         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
366
367         for (j = 0; j < num_rqst; j++) {
368                 iov = rqst[j].rq_iov;
369                 n_vec = rqst[j].rq_nvec;
370
371                 size = 0;
372                 for (i = 0; i < n_vec; i++) {
373                         dump_smb(iov[i].iov_base, iov[i].iov_len);
374                         size += iov[i].iov_len;
375                 }
376
377                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
378
379                 rc = smb_send_kvec(server, &smb_msg, &sent);
380                 if (rc < 0)
381                         goto unmask;
382
383                 total_len += sent;
384
385                 /* now walk the page array and send each page in it */
386                 for (i = 0; i < rqst[j].rq_npages; i++) {
387                         struct bio_vec bvec;
388
389                         bvec.bv_page = rqst[j].rq_pages[i];
390                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
391                                              &bvec.bv_offset);
392
393                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
394                                       &bvec, 1, bvec.bv_len);
395                         rc = smb_send_kvec(server, &smb_msg, &sent);
396                         if (rc < 0)
397                                 break;
398
399                         total_len += sent;
400                 }
401         }
402
403 unmask:
404         sigprocmask(SIG_SETMASK, &oldmask, NULL);
405
406         /*
407          * If signal is pending but we have already sent the whole packet to
408          * the server we need to return success status to allow a corresponding
409          * mid entry to be kept in the pending requests queue thus allowing
410          * to handle responses from the server by the client.
411          *
412          * If only part of the packet has been sent there is no need to hide
413          * interrupt because the session will be reconnected anyway, so there
414          * won't be any response from the server to handle.
415          */
416
417         if (signal_pending(current) && (total_len != send_length)) {
418                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
419                 rc = -ERESTARTSYS;
420         }
421
422         /* uncork it */
423         tcp_sock_set_cork(ssocket->sk, false);
424
425         if ((total_len > 0) && (total_len != send_length)) {
426                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
427                          send_length, total_len);
428                 /*
429                  * If we have only sent part of an SMB then the next SMB could
430                  * be taken as the remainder of this one. We need to kill the
431                  * socket so the server throws away the partial SMB
432                  */
433                 spin_lock(&GlobalMid_Lock);
434                 server->tcpStatus = CifsNeedReconnect;
435                 spin_unlock(&GlobalMid_Lock);
436                 trace_smb3_partial_send_reconnect(server->CurrentMid,
437                                                   server->conn_id, server->hostname);
438         }
439 smbd_done:
440         if (rc < 0 && rc != -EINTR)
441                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
442                          rc);
443         else if (rc > 0)
444                 rc = 0;
445
446         return rc;
447 }
448
449 static int
450 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
451               struct smb_rqst *rqst, int flags)
452 {
453         struct kvec iov;
454         struct smb2_transform_hdr *tr_hdr;
455         struct smb_rqst cur_rqst[MAX_COMPOUND];
456         int rc;
457
458         if (!(flags & CIFS_TRANSFORM_REQ))
459                 return __smb_send_rqst(server, num_rqst, rqst);
460
461         if (num_rqst > MAX_COMPOUND - 1)
462                 return -ENOMEM;
463
464         if (!server->ops->init_transform_rq) {
465                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
466                 return -EIO;
467         }
468
469         tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
470         if (!tr_hdr)
471                 return -ENOMEM;
472
473         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
474         memset(&iov, 0, sizeof(iov));
475         memset(tr_hdr, 0, sizeof(*tr_hdr));
476
477         iov.iov_base = tr_hdr;
478         iov.iov_len = sizeof(*tr_hdr);
479         cur_rqst[0].rq_iov = &iov;
480         cur_rqst[0].rq_nvec = 1;
481
482         rc = server->ops->init_transform_rq(server, num_rqst + 1,
483                                             &cur_rqst[0], rqst);
484         if (rc)
485                 goto out;
486
487         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
488         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
489 out:
490         kfree(tr_hdr);
491         return rc;
492 }
493
494 int
495 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
496          unsigned int smb_buf_length)
497 {
498         struct kvec iov[2];
499         struct smb_rqst rqst = { .rq_iov = iov,
500                                  .rq_nvec = 2 };
501
502         iov[0].iov_base = smb_buffer;
503         iov[0].iov_len = 4;
504         iov[1].iov_base = (char *)smb_buffer + 4;
505         iov[1].iov_len = smb_buf_length;
506
507         return __smb_send_rqst(server, 1, &rqst);
508 }
509
510 static int
511 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
512                       const int timeout, const int flags,
513                       unsigned int *instance)
514 {
515         long rc;
516         int *credits;
517         int optype;
518         long int t;
519         int scredits, in_flight;
520
521         if (timeout < 0)
522                 t = MAX_JIFFY_OFFSET;
523         else
524                 t = msecs_to_jiffies(timeout);
525
526         optype = flags & CIFS_OP_MASK;
527
528         *instance = 0;
529
530         credits = server->ops->get_credits_field(server, optype);
531         /* Since an echo is already inflight, no need to wait to send another */
532         if (*credits <= 0 && optype == CIFS_ECHO_OP)
533                 return -EAGAIN;
534
535         spin_lock(&server->req_lock);
536         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
537                 /* oplock breaks must not be held up */
538                 server->in_flight++;
539                 if (server->in_flight > server->max_in_flight)
540                         server->max_in_flight = server->in_flight;
541                 *credits -= 1;
542                 *instance = server->reconnect_instance;
543                 scredits = *credits;
544                 in_flight = server->in_flight;
545                 spin_unlock(&server->req_lock);
546
547                 trace_smb3_add_credits(server->CurrentMid,
548                                 server->conn_id, server->hostname, scredits, -1, in_flight);
549                 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
550                                 __func__, 1, scredits);
551
552                 return 0;
553         }
554
555         while (1) {
556                 if (*credits < num_credits) {
557                         scredits = *credits;
558                         spin_unlock(&server->req_lock);
559
560                         cifs_num_waiters_inc(server);
561                         rc = wait_event_killable_timeout(server->request_q,
562                                 has_credits(server, credits, num_credits), t);
563                         cifs_num_waiters_dec(server);
564                         if (!rc) {
565                                 spin_lock(&server->req_lock);
566                                 scredits = *credits;
567                                 in_flight = server->in_flight;
568                                 spin_unlock(&server->req_lock);
569
570                                 trace_smb3_credit_timeout(server->CurrentMid,
571                                                 server->conn_id, server->hostname, scredits,
572                                                 num_credits, in_flight);
573                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
574                                                 timeout);
575                                 return -EBUSY;
576                         }
577                         if (rc == -ERESTARTSYS)
578                                 return -ERESTARTSYS;
579                         spin_lock(&server->req_lock);
580                 } else {
581                         if (server->tcpStatus == CifsExiting) {
582                                 spin_unlock(&server->req_lock);
583                                 return -ENOENT;
584                         }
585
586                         /*
587                          * For normal commands, reserve the last MAX_COMPOUND
588                          * credits to compound requests.
589                          * Otherwise these compounds could be permanently
590                          * starved for credits by single-credit requests.
591                          *
592                          * To prevent spinning CPU, block this thread until
593                          * there are >MAX_COMPOUND credits available.
594                          * But only do this is we already have a lot of
595                          * credits in flight to avoid triggering this check
596                          * for servers that are slow to hand out credits on
597                          * new sessions.
598                          */
599                         if (!optype && num_credits == 1 &&
600                             server->in_flight > 2 * MAX_COMPOUND &&
601                             *credits <= MAX_COMPOUND) {
602                                 spin_unlock(&server->req_lock);
603
604                                 cifs_num_waiters_inc(server);
605                                 rc = wait_event_killable_timeout(
606                                         server->request_q,
607                                         has_credits(server, credits,
608                                                     MAX_COMPOUND + 1),
609                                         t);
610                                 cifs_num_waiters_dec(server);
611                                 if (!rc) {
612                                         spin_lock(&server->req_lock);
613                                         scredits = *credits;
614                                         in_flight = server->in_flight;
615                                         spin_unlock(&server->req_lock);
616
617                                         trace_smb3_credit_timeout(
618                                                         server->CurrentMid,
619                                                         server->conn_id, server->hostname,
620                                                         scredits, num_credits, in_flight);
621                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
622                                                         timeout);
623                                         return -EBUSY;
624                                 }
625                                 if (rc == -ERESTARTSYS)
626                                         return -ERESTARTSYS;
627                                 spin_lock(&server->req_lock);
628                                 continue;
629                         }
630
631                         /*
632                          * Can not count locking commands against total
633                          * as they are allowed to block on server.
634                          */
635
636                         /* update # of requests on the wire to server */
637                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
638                                 *credits -= num_credits;
639                                 server->in_flight += num_credits;
640                                 if (server->in_flight > server->max_in_flight)
641                                         server->max_in_flight = server->in_flight;
642                                 *instance = server->reconnect_instance;
643                         }
644                         scredits = *credits;
645                         in_flight = server->in_flight;
646                         spin_unlock(&server->req_lock);
647
648                         trace_smb3_add_credits(server->CurrentMid,
649                                         server->conn_id, server->hostname, scredits,
650                                         -(num_credits), in_flight);
651                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
652                                         __func__, num_credits, scredits);
653                         break;
654                 }
655         }
656         return 0;
657 }
658
659 static int
660 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
661                       unsigned int *instance)
662 {
663         return wait_for_free_credits(server, 1, -1, flags,
664                                      instance);
665 }
666
667 static int
668 wait_for_compound_request(struct TCP_Server_Info *server, int num,
669                           const int flags, unsigned int *instance)
670 {
671         int *credits;
672         int scredits, in_flight;
673
674         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
675
676         spin_lock(&server->req_lock);
677         scredits = *credits;
678         in_flight = server->in_flight;
679
680         if (*credits < num) {
681                 /*
682                  * If the server is tight on resources or just gives us less
683                  * credits for other reasons (e.g. requests are coming out of
684                  * order and the server delays granting more credits until it
685                  * processes a missing mid) and we exhausted most available
686                  * credits there may be situations when we try to send
687                  * a compound request but we don't have enough credits. At this
688                  * point the client needs to decide if it should wait for
689                  * additional credits or fail the request. If at least one
690                  * request is in flight there is a high probability that the
691                  * server will return enough credits to satisfy this compound
692                  * request.
693                  *
694                  * Return immediately if no requests in flight since we will be
695                  * stuck on waiting for credits.
696                  */
697                 if (server->in_flight == 0) {
698                         spin_unlock(&server->req_lock);
699                         trace_smb3_insufficient_credits(server->CurrentMid,
700                                         server->conn_id, server->hostname, scredits,
701                                         num, in_flight);
702                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
703                                         __func__, in_flight, num, scredits);
704                         return -EDEADLK;
705                 }
706         }
707         spin_unlock(&server->req_lock);
708
709         return wait_for_free_credits(server, num, 60000, flags,
710                                      instance);
711 }
712
713 int
714 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
715                       unsigned int *num, struct cifs_credits *credits)
716 {
717         *num = size;
718         credits->value = 0;
719         credits->instance = server->reconnect_instance;
720         return 0;
721 }
722
723 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
724                         struct mid_q_entry **ppmidQ)
725 {
726         if (ses->server->tcpStatus == CifsExiting) {
727                 return -ENOENT;
728         }
729
730         if (ses->server->tcpStatus == CifsNeedReconnect) {
731                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
732                 return -EAGAIN;
733         }
734
735         if (ses->status == CifsNew) {
736                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
737                         (in_buf->Command != SMB_COM_NEGOTIATE))
738                         return -EAGAIN;
739                 /* else ok - we are setting up session */
740         }
741
742         if (ses->status == CifsExiting) {
743                 /* check if SMB session is bad because we are setting it up */
744                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
745                         return -EAGAIN;
746                 /* else ok - we are shutting down session */
747         }
748
749         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
750         if (*ppmidQ == NULL)
751                 return -ENOMEM;
752         spin_lock(&GlobalMid_Lock);
753         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
754         spin_unlock(&GlobalMid_Lock);
755         return 0;
756 }
757
758 static int
759 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
760 {
761         int error;
762
763         error = wait_event_freezekillable_unsafe(server->response_q,
764                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
765         if (error < 0)
766                 return -ERESTARTSYS;
767
768         return 0;
769 }
770
771 struct mid_q_entry *
772 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
773 {
774         int rc;
775         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
776         struct mid_q_entry *mid;
777
778         if (rqst->rq_iov[0].iov_len != 4 ||
779             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
780                 return ERR_PTR(-EIO);
781
782         /* enable signing if server requires it */
783         if (server->sign)
784                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
785
786         mid = AllocMidQEntry(hdr, server);
787         if (mid == NULL)
788                 return ERR_PTR(-ENOMEM);
789
790         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
791         if (rc) {
792                 DeleteMidQEntry(mid);
793                 return ERR_PTR(rc);
794         }
795
796         return mid;
797 }
798
799 /*
800  * Send a SMB request and set the callback function in the mid to handle
801  * the result. Caller is responsible for dealing with timeouts.
802  */
803 int
804 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
805                 mid_receive_t *receive, mid_callback_t *callback,
806                 mid_handle_t *handle, void *cbdata, const int flags,
807                 const struct cifs_credits *exist_credits)
808 {
809         int rc;
810         struct mid_q_entry *mid;
811         struct cifs_credits credits = { .value = 0, .instance = 0 };
812         unsigned int instance;
813         int optype;
814
815         optype = flags & CIFS_OP_MASK;
816
817         if ((flags & CIFS_HAS_CREDITS) == 0) {
818                 rc = wait_for_free_request(server, flags, &instance);
819                 if (rc)
820                         return rc;
821                 credits.value = 1;
822                 credits.instance = instance;
823         } else
824                 instance = exist_credits->instance;
825
826         mutex_lock(&server->srv_mutex);
827
828         /*
829          * We can't use credits obtained from the previous session to send this
830          * request. Check if there were reconnects after we obtained credits and
831          * return -EAGAIN in such cases to let callers handle it.
832          */
833         if (instance != server->reconnect_instance) {
834                 mutex_unlock(&server->srv_mutex);
835                 add_credits_and_wake_if(server, &credits, optype);
836                 return -EAGAIN;
837         }
838
839         mid = server->ops->setup_async_request(server, rqst);
840         if (IS_ERR(mid)) {
841                 mutex_unlock(&server->srv_mutex);
842                 add_credits_and_wake_if(server, &credits, optype);
843                 return PTR_ERR(mid);
844         }
845
846         mid->receive = receive;
847         mid->callback = callback;
848         mid->callback_data = cbdata;
849         mid->handle = handle;
850         mid->mid_state = MID_REQUEST_SUBMITTED;
851
852         /* put it on the pending_mid_q */
853         spin_lock(&GlobalMid_Lock);
854         list_add_tail(&mid->qhead, &server->pending_mid_q);
855         spin_unlock(&GlobalMid_Lock);
856
857         /*
858          * Need to store the time in mid before calling I/O. For call_async,
859          * I/O response may come back and free the mid entry on another thread.
860          */
861         cifs_save_when_sent(mid);
862         cifs_in_send_inc(server);
863         rc = smb_send_rqst(server, 1, rqst, flags);
864         cifs_in_send_dec(server);
865
866         if (rc < 0) {
867                 revert_current_mid(server, mid->credits);
868                 server->sequence_number -= 2;
869                 cifs_delete_mid(mid);
870         }
871
872         mutex_unlock(&server->srv_mutex);
873
874         if (rc == 0)
875                 return 0;
876
877         add_credits_and_wake_if(server, &credits, optype);
878         return rc;
879 }
880
881 /*
882  *
883  * Send an SMB Request.  No response info (other than return code)
884  * needs to be parsed.
885  *
886  * flags indicate the type of request buffer and how long to wait
887  * and whether to log NT STATUS code (error) before mapping it to POSIX error
888  *
889  */
890 int
891 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
892                  char *in_buf, int flags)
893 {
894         int rc;
895         struct kvec iov[1];
896         struct kvec rsp_iov;
897         int resp_buf_type;
898
899         iov[0].iov_base = in_buf;
900         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
901         flags |= CIFS_NO_RSP_BUF;
902         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
903         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
904
905         return rc;
906 }
907
908 static int
909 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
910 {
911         int rc = 0;
912
913         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
914                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
915
916         spin_lock(&GlobalMid_Lock);
917         switch (mid->mid_state) {
918         case MID_RESPONSE_RECEIVED:
919                 spin_unlock(&GlobalMid_Lock);
920                 return rc;
921         case MID_RETRY_NEEDED:
922                 rc = -EAGAIN;
923                 break;
924         case MID_RESPONSE_MALFORMED:
925                 rc = -EIO;
926                 break;
927         case MID_SHUTDOWN:
928                 rc = -EHOSTDOWN;
929                 break;
930         default:
931                 if (!(mid->mid_flags & MID_DELETED)) {
932                         list_del_init(&mid->qhead);
933                         mid->mid_flags |= MID_DELETED;
934                 }
935                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
936                          __func__, mid->mid, mid->mid_state);
937                 rc = -EIO;
938         }
939         spin_unlock(&GlobalMid_Lock);
940
941         DeleteMidQEntry(mid);
942         return rc;
943 }
944
945 static inline int
946 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
947             struct mid_q_entry *mid)
948 {
949         return server->ops->send_cancel ?
950                                 server->ops->send_cancel(server, rqst, mid) : 0;
951 }
952
953 int
954 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
955                    bool log_error)
956 {
957         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
958
959         dump_smb(mid->resp_buf, min_t(u32, 92, len));
960
961         /* convert the length into a more usable form */
962         if (server->sign) {
963                 struct kvec iov[2];
964                 int rc = 0;
965                 struct smb_rqst rqst = { .rq_iov = iov,
966                                          .rq_nvec = 2 };
967
968                 iov[0].iov_base = mid->resp_buf;
969                 iov[0].iov_len = 4;
970                 iov[1].iov_base = (char *)mid->resp_buf + 4;
971                 iov[1].iov_len = len - 4;
972                 /* FIXME: add code to kill session */
973                 rc = cifs_verify_signature(&rqst, server,
974                                            mid->sequence_number);
975                 if (rc)
976                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
977                                  rc);
978         }
979
980         /* BB special case reconnect tid and uid here? */
981         return map_and_check_smb_error(mid, log_error);
982 }
983
984 struct mid_q_entry *
985 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
986                    struct smb_rqst *rqst)
987 {
988         int rc;
989         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
990         struct mid_q_entry *mid;
991
992         if (rqst->rq_iov[0].iov_len != 4 ||
993             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
994                 return ERR_PTR(-EIO);
995
996         rc = allocate_mid(ses, hdr, &mid);
997         if (rc)
998                 return ERR_PTR(rc);
999         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1000         if (rc) {
1001                 cifs_delete_mid(mid);
1002                 return ERR_PTR(rc);
1003         }
1004         return mid;
1005 }
1006
1007 static void
1008 cifs_compound_callback(struct mid_q_entry *mid)
1009 {
1010         struct TCP_Server_Info *server = mid->server;
1011         struct cifs_credits credits;
1012
1013         credits.value = server->ops->get_credits(mid);
1014         credits.instance = server->reconnect_instance;
1015
1016         add_credits(server, &credits, mid->optype);
1017 }
1018
1019 static void
1020 cifs_compound_last_callback(struct mid_q_entry *mid)
1021 {
1022         cifs_compound_callback(mid);
1023         cifs_wake_up_task(mid);
1024 }
1025
1026 static void
1027 cifs_cancelled_callback(struct mid_q_entry *mid)
1028 {
1029         cifs_compound_callback(mid);
1030         DeleteMidQEntry(mid);
1031 }
1032
1033 /*
1034  * Return a channel (master if none) of @ses that can be used to send
1035  * regular requests.
1036  *
1037  * If we are currently binding a new channel (negprot/sess.setup),
1038  * return the new incomplete channel.
1039  */
1040 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1041 {
1042         uint index = 0;
1043
1044         if (!ses)
1045                 return NULL;
1046
1047         if (!ses->binding) {
1048                 /* round robin */
1049                 if (ses->chan_count > 1) {
1050                         index = (uint)atomic_inc_return(&ses->chan_seq);
1051                         index %= ses->chan_count;
1052                 }
1053                 return ses->chans[index].server;
1054         } else {
1055                 return cifs_ses_server(ses);
1056         }
1057 }
1058
1059 int
1060 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1061                    struct TCP_Server_Info *server,
1062                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1063                    int *resp_buf_type, struct kvec *resp_iov)
1064 {
1065         int i, j, optype, rc = 0;
1066         struct mid_q_entry *midQ[MAX_COMPOUND];
1067         bool cancelled_mid[MAX_COMPOUND] = {false};
1068         struct cifs_credits credits[MAX_COMPOUND] = {
1069                 { .value = 0, .instance = 0 }
1070         };
1071         unsigned int instance;
1072         char *buf;
1073
1074         optype = flags & CIFS_OP_MASK;
1075
1076         for (i = 0; i < num_rqst; i++)
1077                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1078
1079         if (!ses || !ses->server || !server) {
1080                 cifs_dbg(VFS, "Null session\n");
1081                 return -EIO;
1082         }
1083
1084         if (server->tcpStatus == CifsExiting)
1085                 return -ENOENT;
1086
1087         /*
1088          * Wait for all the requests to become available.
1089          * This approach still leaves the possibility to be stuck waiting for
1090          * credits if the server doesn't grant credits to the outstanding
1091          * requests and if the client is completely idle, not generating any
1092          * other requests.
1093          * This can be handled by the eventual session reconnect.
1094          */
1095         rc = wait_for_compound_request(server, num_rqst, flags,
1096                                        &instance);
1097         if (rc)
1098                 return rc;
1099
1100         for (i = 0; i < num_rqst; i++) {
1101                 credits[i].value = 1;
1102                 credits[i].instance = instance;
1103         }
1104
1105         /*
1106          * Make sure that we sign in the same order that we send on this socket
1107          * and avoid races inside tcp sendmsg code that could cause corruption
1108          * of smb data.
1109          */
1110
1111         mutex_lock(&server->srv_mutex);
1112
1113         /*
1114          * All the parts of the compound chain belong obtained credits from the
1115          * same session. We can not use credits obtained from the previous
1116          * session to send this request. Check if there were reconnects after
1117          * we obtained credits and return -EAGAIN in such cases to let callers
1118          * handle it.
1119          */
1120         if (instance != server->reconnect_instance) {
1121                 mutex_unlock(&server->srv_mutex);
1122                 for (j = 0; j < num_rqst; j++)
1123                         add_credits(server, &credits[j], optype);
1124                 return -EAGAIN;
1125         }
1126
1127         for (i = 0; i < num_rqst; i++) {
1128                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1129                 if (IS_ERR(midQ[i])) {
1130                         revert_current_mid(server, i);
1131                         for (j = 0; j < i; j++)
1132                                 cifs_delete_mid(midQ[j]);
1133                         mutex_unlock(&server->srv_mutex);
1134
1135                         /* Update # of requests on wire to server */
1136                         for (j = 0; j < num_rqst; j++)
1137                                 add_credits(server, &credits[j], optype);
1138                         return PTR_ERR(midQ[i]);
1139                 }
1140
1141                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1142                 midQ[i]->optype = optype;
1143                 /*
1144                  * Invoke callback for every part of the compound chain
1145                  * to calculate credits properly. Wake up this thread only when
1146                  * the last element is received.
1147                  */
1148                 if (i < num_rqst - 1)
1149                         midQ[i]->callback = cifs_compound_callback;
1150                 else
1151                         midQ[i]->callback = cifs_compound_last_callback;
1152         }
1153         cifs_in_send_inc(server);
1154         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1155         cifs_in_send_dec(server);
1156
1157         for (i = 0; i < num_rqst; i++)
1158                 cifs_save_when_sent(midQ[i]);
1159
1160         if (rc < 0) {
1161                 revert_current_mid(server, num_rqst);
1162                 server->sequence_number -= 2;
1163         }
1164
1165         mutex_unlock(&server->srv_mutex);
1166
1167         /*
1168          * If sending failed for some reason or it is an oplock break that we
1169          * will not receive a response to - return credits back
1170          */
1171         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1172                 for (i = 0; i < num_rqst; i++)
1173                         add_credits(server, &credits[i], optype);
1174                 goto out;
1175         }
1176
1177         /*
1178          * At this point the request is passed to the network stack - we assume
1179          * that any credits taken from the server structure on the client have
1180          * been spent and we can't return them back. Once we receive responses
1181          * we will collect credits granted by the server in the mid callbacks
1182          * and add those credits to the server structure.
1183          */
1184
1185         /*
1186          * Compounding is never used during session establish.
1187          */
1188         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1189                 mutex_lock(&server->srv_mutex);
1190                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1191                                            rqst[0].rq_nvec);
1192                 mutex_unlock(&server->srv_mutex);
1193         }
1194
1195         for (i = 0; i < num_rqst; i++) {
1196                 rc = wait_for_response(server, midQ[i]);
1197                 if (rc != 0)
1198                         break;
1199         }
1200         if (rc != 0) {
1201                 for (; i < num_rqst; i++) {
1202                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1203                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1204                         send_cancel(server, &rqst[i], midQ[i]);
1205                         spin_lock(&GlobalMid_Lock);
1206                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1207                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1208                                 midQ[i]->callback = cifs_cancelled_callback;
1209                                 cancelled_mid[i] = true;
1210                                 credits[i].value = 0;
1211                         }
1212                         spin_unlock(&GlobalMid_Lock);
1213                 }
1214         }
1215
1216         for (i = 0; i < num_rqst; i++) {
1217                 if (rc < 0)
1218                         goto out;
1219
1220                 rc = cifs_sync_mid_result(midQ[i], server);
1221                 if (rc != 0) {
1222                         /* mark this mid as cancelled to not free it below */
1223                         cancelled_mid[i] = true;
1224                         goto out;
1225                 }
1226
1227                 if (!midQ[i]->resp_buf ||
1228                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1229                         rc = -EIO;
1230                         cifs_dbg(FYI, "Bad MID state?\n");
1231                         goto out;
1232                 }
1233
1234                 buf = (char *)midQ[i]->resp_buf;
1235                 resp_iov[i].iov_base = buf;
1236                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1237                         server->vals->header_preamble_size;
1238
1239                 if (midQ[i]->large_buf)
1240                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1241                 else
1242                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1243
1244                 rc = server->ops->check_receive(midQ[i], server,
1245                                                      flags & CIFS_LOG_ERROR);
1246
1247                 /* mark it so buf will not be freed by cifs_delete_mid */
1248                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1249                         midQ[i]->resp_buf = NULL;
1250
1251         }
1252
1253         /*
1254          * Compounding is never used during session establish.
1255          */
1256         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1257                 struct kvec iov = {
1258                         .iov_base = resp_iov[0].iov_base,
1259                         .iov_len = resp_iov[0].iov_len
1260                 };
1261                 mutex_lock(&server->srv_mutex);
1262                 smb311_update_preauth_hash(ses, &iov, 1);
1263                 mutex_unlock(&server->srv_mutex);
1264         }
1265
1266 out:
1267         /*
1268          * This will dequeue all mids. After this it is important that the
1269          * demultiplex_thread will not process any of these mids any futher.
1270          * This is prevented above by using a noop callback that will not
1271          * wake this thread except for the very last PDU.
1272          */
1273         for (i = 0; i < num_rqst; i++) {
1274                 if (!cancelled_mid[i])
1275                         cifs_delete_mid(midQ[i]);
1276         }
1277
1278         return rc;
1279 }
1280
1281 int
1282 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1283                struct TCP_Server_Info *server,
1284                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1285                struct kvec *resp_iov)
1286 {
1287         return compound_send_recv(xid, ses, server, flags, 1,
1288                                   rqst, resp_buf_type, resp_iov);
1289 }
1290
1291 int
1292 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1293              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1294              const int flags, struct kvec *resp_iov)
1295 {
1296         struct smb_rqst rqst;
1297         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1298         int rc;
1299
1300         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1301                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1302                                         GFP_KERNEL);
1303                 if (!new_iov) {
1304                         /* otherwise cifs_send_recv below sets resp_buf_type */
1305                         *resp_buf_type = CIFS_NO_BUFFER;
1306                         return -ENOMEM;
1307                 }
1308         } else
1309                 new_iov = s_iov;
1310
1311         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1312         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1313
1314         new_iov[0].iov_base = new_iov[1].iov_base;
1315         new_iov[0].iov_len = 4;
1316         new_iov[1].iov_base += 4;
1317         new_iov[1].iov_len -= 4;
1318
1319         memset(&rqst, 0, sizeof(struct smb_rqst));
1320         rqst.rq_iov = new_iov;
1321         rqst.rq_nvec = n_vec + 1;
1322
1323         rc = cifs_send_recv(xid, ses, ses->server,
1324                             &rqst, resp_buf_type, flags, resp_iov);
1325         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1326                 kfree(new_iov);
1327         return rc;
1328 }
1329
1330 int
1331 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1332             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1333             int *pbytes_returned, const int flags)
1334 {
1335         int rc = 0;
1336         struct mid_q_entry *midQ;
1337         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1338         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1339         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1340         struct cifs_credits credits = { .value = 1, .instance = 0 };
1341         struct TCP_Server_Info *server;
1342
1343         if (ses == NULL) {
1344                 cifs_dbg(VFS, "Null smb session\n");
1345                 return -EIO;
1346         }
1347         server = ses->server;
1348         if (server == NULL) {
1349                 cifs_dbg(VFS, "Null tcp session\n");
1350                 return -EIO;
1351         }
1352
1353         if (server->tcpStatus == CifsExiting)
1354                 return -ENOENT;
1355
1356         /* Ensure that we do not send more than 50 overlapping requests
1357            to the same server. We may make this configurable later or
1358            use ses->maxReq */
1359
1360         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1361                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1362                                 len);
1363                 return -EIO;
1364         }
1365
1366         rc = wait_for_free_request(server, flags, &credits.instance);
1367         if (rc)
1368                 return rc;
1369
1370         /* make sure that we sign in the same order that we send on this socket
1371            and avoid races inside tcp sendmsg code that could cause corruption
1372            of smb data */
1373
1374         mutex_lock(&server->srv_mutex);
1375
1376         rc = allocate_mid(ses, in_buf, &midQ);
1377         if (rc) {
1378                 mutex_unlock(&server->srv_mutex);
1379                 /* Update # of requests on wire to server */
1380                 add_credits(server, &credits, 0);
1381                 return rc;
1382         }
1383
1384         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1385         if (rc) {
1386                 mutex_unlock(&server->srv_mutex);
1387                 goto out;
1388         }
1389
1390         midQ->mid_state = MID_REQUEST_SUBMITTED;
1391
1392         cifs_in_send_inc(server);
1393         rc = smb_send(server, in_buf, len);
1394         cifs_in_send_dec(server);
1395         cifs_save_when_sent(midQ);
1396
1397         if (rc < 0)
1398                 server->sequence_number -= 2;
1399
1400         mutex_unlock(&server->srv_mutex);
1401
1402         if (rc < 0)
1403                 goto out;
1404
1405         rc = wait_for_response(server, midQ);
1406         if (rc != 0) {
1407                 send_cancel(server, &rqst, midQ);
1408                 spin_lock(&GlobalMid_Lock);
1409                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1410                         /* no longer considered to be "in-flight" */
1411                         midQ->callback = DeleteMidQEntry;
1412                         spin_unlock(&GlobalMid_Lock);
1413                         add_credits(server, &credits, 0);
1414                         return rc;
1415                 }
1416                 spin_unlock(&GlobalMid_Lock);
1417         }
1418
1419         rc = cifs_sync_mid_result(midQ, server);
1420         if (rc != 0) {
1421                 add_credits(server, &credits, 0);
1422                 return rc;
1423         }
1424
1425         if (!midQ->resp_buf || !out_buf ||
1426             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1427                 rc = -EIO;
1428                 cifs_server_dbg(VFS, "Bad MID state?\n");
1429                 goto out;
1430         }
1431
1432         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1433         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1434         rc = cifs_check_receive(midQ, server, 0);
1435 out:
1436         cifs_delete_mid(midQ);
1437         add_credits(server, &credits, 0);
1438
1439         return rc;
1440 }
1441
1442 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1443    blocking lock to return. */
1444
1445 static int
1446 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1447                         struct smb_hdr *in_buf,
1448                         struct smb_hdr *out_buf)
1449 {
1450         int bytes_returned;
1451         struct cifs_ses *ses = tcon->ses;
1452         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1453
1454         /* We just modify the current in_buf to change
1455            the type of lock from LOCKING_ANDX_SHARED_LOCK
1456            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1457            LOCKING_ANDX_CANCEL_LOCK. */
1458
1459         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1460         pSMB->Timeout = 0;
1461         pSMB->hdr.Mid = get_next_mid(ses->server);
1462
1463         return SendReceive(xid, ses, in_buf, out_buf,
1464                         &bytes_returned, 0);
1465 }
1466
1467 int
1468 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1469             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1470             int *pbytes_returned)
1471 {
1472         int rc = 0;
1473         int rstart = 0;
1474         struct mid_q_entry *midQ;
1475         struct cifs_ses *ses;
1476         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1477         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1478         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1479         unsigned int instance;
1480         struct TCP_Server_Info *server;
1481
1482         if (tcon == NULL || tcon->ses == NULL) {
1483                 cifs_dbg(VFS, "Null smb session\n");
1484                 return -EIO;
1485         }
1486         ses = tcon->ses;
1487         server = ses->server;
1488
1489         if (server == NULL) {
1490                 cifs_dbg(VFS, "Null tcp session\n");
1491                 return -EIO;
1492         }
1493
1494         if (server->tcpStatus == CifsExiting)
1495                 return -ENOENT;
1496
1497         /* Ensure that we do not send more than 50 overlapping requests
1498            to the same server. We may make this configurable later or
1499            use ses->maxReq */
1500
1501         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1502                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1503                               len);
1504                 return -EIO;
1505         }
1506
1507         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1508         if (rc)
1509                 return rc;
1510
1511         /* make sure that we sign in the same order that we send on this socket
1512            and avoid races inside tcp sendmsg code that could cause corruption
1513            of smb data */
1514
1515         mutex_lock(&server->srv_mutex);
1516
1517         rc = allocate_mid(ses, in_buf, &midQ);
1518         if (rc) {
1519                 mutex_unlock(&server->srv_mutex);
1520                 return rc;
1521         }
1522
1523         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1524         if (rc) {
1525                 cifs_delete_mid(midQ);
1526                 mutex_unlock(&server->srv_mutex);
1527                 return rc;
1528         }
1529
1530         midQ->mid_state = MID_REQUEST_SUBMITTED;
1531         cifs_in_send_inc(server);
1532         rc = smb_send(server, in_buf, len);
1533         cifs_in_send_dec(server);
1534         cifs_save_when_sent(midQ);
1535
1536         if (rc < 0)
1537                 server->sequence_number -= 2;
1538
1539         mutex_unlock(&server->srv_mutex);
1540
1541         if (rc < 0) {
1542                 cifs_delete_mid(midQ);
1543                 return rc;
1544         }
1545
1546         /* Wait for a reply - allow signals to interrupt. */
1547         rc = wait_event_interruptible(server->response_q,
1548                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1549                 ((server->tcpStatus != CifsGood) &&
1550                  (server->tcpStatus != CifsNew)));
1551
1552         /* Were we interrupted by a signal ? */
1553         if ((rc == -ERESTARTSYS) &&
1554                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1555                 ((server->tcpStatus == CifsGood) ||
1556                  (server->tcpStatus == CifsNew))) {
1557
1558                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1559                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1560                            blocking lock to return. */
1561                         rc = send_cancel(server, &rqst, midQ);
1562                         if (rc) {
1563                                 cifs_delete_mid(midQ);
1564                                 return rc;
1565                         }
1566                 } else {
1567                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1568                            to cause the blocking lock to return. */
1569
1570                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1571
1572                         /* If we get -ENOLCK back the lock may have
1573                            already been removed. Don't exit in this case. */
1574                         if (rc && rc != -ENOLCK) {
1575                                 cifs_delete_mid(midQ);
1576                                 return rc;
1577                         }
1578                 }
1579
1580                 rc = wait_for_response(server, midQ);
1581                 if (rc) {
1582                         send_cancel(server, &rqst, midQ);
1583                         spin_lock(&GlobalMid_Lock);
1584                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1585                                 /* no longer considered to be "in-flight" */
1586                                 midQ->callback = DeleteMidQEntry;
1587                                 spin_unlock(&GlobalMid_Lock);
1588                                 return rc;
1589                         }
1590                         spin_unlock(&GlobalMid_Lock);
1591                 }
1592
1593                 /* We got the response - restart system call. */
1594                 rstart = 1;
1595         }
1596
1597         rc = cifs_sync_mid_result(midQ, server);
1598         if (rc != 0)
1599                 return rc;
1600
1601         /* rcvd frame is ok */
1602         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1603                 rc = -EIO;
1604                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1605                 goto out;
1606         }
1607
1608         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1609         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1610         rc = cifs_check_receive(midQ, server, 0);
1611 out:
1612         cifs_delete_mid(midQ);
1613         if (rstart && rc == -EACCES)
1614                 return -ERESTARTSYS;
1615         return rc;
1616 }