cifs: do all necessary checks for credits within or before locking
[platform/kernel/linux-starfive.git] / fs / smb / client / transport.c
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  */
9
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <asm/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
31
32 /* Max number of iovectors we can use off the stack when sending requests. */
33 #define CIFS_MAX_IOV_SIZE 8
34
35 void
36 cifs_wake_up_task(struct mid_q_entry *mid)
37 {
38         wake_up_process(mid->callback_data);
39 }
40
41 static struct mid_q_entry *
42 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
43 {
44         struct mid_q_entry *temp;
45
46         if (server == NULL) {
47                 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
48                 return NULL;
49         }
50
51         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
52         memset(temp, 0, sizeof(struct mid_q_entry));
53         kref_init(&temp->refcount);
54         temp->mid = get_mid(smb_buffer);
55         temp->pid = current->pid;
56         temp->command = cpu_to_le16(smb_buffer->Command);
57         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
58         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
59         /* when mid allocated can be before when sent */
60         temp->when_alloc = jiffies;
61         temp->server = server;
62
63         /*
64          * The default is for the mid to be synchronous, so the
65          * default callback just wakes up the current task.
66          */
67         get_task_struct(current);
68         temp->creator = current;
69         temp->callback = cifs_wake_up_task;
70         temp->callback_data = current;
71
72         atomic_inc(&mid_count);
73         temp->mid_state = MID_REQUEST_ALLOCATED;
74         return temp;
75 }
76
77 static void __release_mid(struct kref *refcount)
78 {
79         struct mid_q_entry *midEntry =
80                         container_of(refcount, struct mid_q_entry, refcount);
81 #ifdef CONFIG_CIFS_STATS2
82         __le16 command = midEntry->server->vals->lock_cmd;
83         __u16 smb_cmd = le16_to_cpu(midEntry->command);
84         unsigned long now;
85         unsigned long roundtrip_time;
86 #endif
87         struct TCP_Server_Info *server = midEntry->server;
88
89         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90             midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91             server->ops->handle_cancelled_mid)
92                 server->ops->handle_cancelled_mid(midEntry, server);
93
94         midEntry->mid_state = MID_FREE;
95         atomic_dec(&mid_count);
96         if (midEntry->large_buf)
97                 cifs_buf_release(midEntry->resp_buf);
98         else
99                 cifs_small_buf_release(midEntry->resp_buf);
100 #ifdef CONFIG_CIFS_STATS2
101         now = jiffies;
102         if (now < midEntry->when_alloc)
103                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
104         roundtrip_time = now - midEntry->when_alloc;
105
106         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108                         server->slowest_cmd[smb_cmd] = roundtrip_time;
109                         server->fastest_cmd[smb_cmd] = roundtrip_time;
110                 } else {
111                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
113                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
115                 }
116                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
117                 server->time_per_cmd[smb_cmd] += roundtrip_time;
118         }
119         /*
120          * commands taking longer than one second (default) can be indications
121          * that something is wrong, unless it is quite a slow link or a very
122          * busy server. Note that this calc is unlikely or impossible to wrap
123          * as long as slow_rsp_threshold is not set way above recommended max
124          * value (32767 ie 9 hours) and is generally harmless even if wrong
125          * since only affects debug counters - so leaving the calc as simple
126          * comparison rather than doing multiple conversions and overflow
127          * checks
128          */
129         if ((slow_rsp_threshold != 0) &&
130             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
131             (midEntry->command != command)) {
132                 /*
133                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134                  * NB: le16_to_cpu returns unsigned so can not be negative below
135                  */
136                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
138
139                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
140                                midEntry->when_sent, midEntry->when_received);
141                 if (cifsFYI & CIFS_TIMER) {
142                         pr_debug("slow rsp: cmd %d mid %llu",
143                                  midEntry->command, midEntry->mid);
144                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145                                   now - midEntry->when_alloc,
146                                   now - midEntry->when_sent,
147                                   now - midEntry->when_received);
148                 }
149         }
150 #endif
151         put_task_struct(midEntry->creator);
152
153         mempool_free(midEntry, cifs_mid_poolp);
154 }
155
156 void release_mid(struct mid_q_entry *mid)
157 {
158         struct TCP_Server_Info *server = mid->server;
159
160         spin_lock(&server->mid_lock);
161         kref_put(&mid->refcount, __release_mid);
162         spin_unlock(&server->mid_lock);
163 }
164
165 void
166 delete_mid(struct mid_q_entry *mid)
167 {
168         spin_lock(&mid->server->mid_lock);
169         if (!(mid->mid_flags & MID_DELETED)) {
170                 list_del_init(&mid->qhead);
171                 mid->mid_flags |= MID_DELETED;
172         }
173         spin_unlock(&mid->server->mid_lock);
174
175         release_mid(mid);
176 }
177
178 /*
179  * smb_send_kvec - send an array of kvecs to the server
180  * @server:     Server to send the data to
181  * @smb_msg:    Message to send
182  * @sent:       amount of data sent on socket is stored here
183  *
184  * Our basic "send data to server" function. Should be called with srv_mutex
185  * held. The caller is responsible for handling the results.
186  */
187 static int
188 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
189               size_t *sent)
190 {
191         int rc = 0;
192         int retries = 0;
193         struct socket *ssocket = server->ssocket;
194
195         *sent = 0;
196
197         if (server->noblocksnd)
198                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
199         else
200                 smb_msg->msg_flags = MSG_NOSIGNAL;
201
202         while (msg_data_left(smb_msg)) {
203                 /*
204                  * If blocking send, we try 3 times, since each can block
205                  * for 5 seconds. For nonblocking  we have to try more
206                  * but wait increasing amounts of time allowing time for
207                  * socket to clear.  The overall time we wait in either
208                  * case to send on the socket is about 15 seconds.
209                  * Similarly we wait for 15 seconds for a response from
210                  * the server in SendReceive[2] for the server to send
211                  * a response back for most types of requests (except
212                  * SMB Write past end of file which can be slow, and
213                  * blocking lock operations). NFS waits slightly longer
214                  * than CIFS, but this can make it take longer for
215                  * nonresponsive servers to be detected and 15 seconds
216                  * is more than enough time for modern networks to
217                  * send a packet.  In most cases if we fail to send
218                  * after the retries we will kill the socket and
219                  * reconnect which may clear the network problem.
220                  */
221                 rc = sock_sendmsg(ssocket, smb_msg);
222                 if (rc == -EAGAIN) {
223                         retries++;
224                         if (retries >= 14 ||
225                             (!server->noblocksnd && (retries > 2))) {
226                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
227                                          ssocket);
228                                 return -EAGAIN;
229                         }
230                         msleep(1 << retries);
231                         continue;
232                 }
233
234                 if (rc < 0)
235                         return rc;
236
237                 if (rc == 0) {
238                         /* should never happen, letting socket clear before
239                            retrying is our only obvious option here */
240                         cifs_server_dbg(VFS, "tcp sent no data\n");
241                         msleep(500);
242                         continue;
243                 }
244
245                 /* send was at least partially successful */
246                 *sent += rc;
247                 retries = 0; /* in case we get ENOSPC on the next send */
248         }
249         return 0;
250 }
251
252 unsigned long
253 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
254 {
255         unsigned int i;
256         struct kvec *iov;
257         int nvec;
258         unsigned long buflen = 0;
259
260         if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
261             rqst->rq_iov[0].iov_len == 4) {
262                 iov = &rqst->rq_iov[1];
263                 nvec = rqst->rq_nvec - 1;
264         } else {
265                 iov = rqst->rq_iov;
266                 nvec = rqst->rq_nvec;
267         }
268
269         /* total up iov array first */
270         for (i = 0; i < nvec; i++)
271                 buflen += iov[i].iov_len;
272
273         /*
274          * Add in the page array if there is one. The caller needs to make
275          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
276          * multiple pages ends at page boundary, rq_tailsz needs to be set to
277          * PAGE_SIZE.
278          */
279         if (rqst->rq_npages) {
280                 if (rqst->rq_npages == 1)
281                         buflen += rqst->rq_tailsz;
282                 else {
283                         /*
284                          * If there is more than one page, calculate the
285                          * buffer length based on rq_offset and rq_tailsz
286                          */
287                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
288                                         rqst->rq_offset;
289                         buflen += rqst->rq_tailsz;
290                 }
291         }
292
293         return buflen;
294 }
295
296 static int
297 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
298                 struct smb_rqst *rqst)
299 {
300         int rc;
301         struct kvec *iov;
302         int n_vec;
303         unsigned int send_length = 0;
304         unsigned int i, j;
305         sigset_t mask, oldmask;
306         size_t total_len = 0, sent, size;
307         struct socket *ssocket = server->ssocket;
308         struct msghdr smb_msg = {};
309         __be32 rfc1002_marker;
310
311         cifs_in_send_inc(server);
312         if (cifs_rdma_enabled(server)) {
313                 /* return -EAGAIN when connecting or reconnecting */
314                 rc = -EAGAIN;
315                 if (server->smbd_conn)
316                         rc = smbd_send(server, num_rqst, rqst);
317                 goto smbd_done;
318         }
319
320         rc = -EAGAIN;
321         if (ssocket == NULL)
322                 goto out;
323
324         rc = -ERESTARTSYS;
325         if (fatal_signal_pending(current)) {
326                 cifs_dbg(FYI, "signal pending before send request\n");
327                 goto out;
328         }
329
330         rc = 0;
331         /* cork the socket */
332         tcp_sock_set_cork(ssocket->sk, true);
333
334         for (j = 0; j < num_rqst; j++)
335                 send_length += smb_rqst_len(server, &rqst[j]);
336         rfc1002_marker = cpu_to_be32(send_length);
337
338         /*
339          * We should not allow signals to interrupt the network send because
340          * any partial send will cause session reconnects thus increasing
341          * latency of system calls and overload a server with unnecessary
342          * requests.
343          */
344
345         sigfillset(&mask);
346         sigprocmask(SIG_BLOCK, &mask, &oldmask);
347
348         /* Generate a rfc1002 marker for SMB2+ */
349         if (!is_smb1(server)) {
350                 struct kvec hiov = {
351                         .iov_base = &rfc1002_marker,
352                         .iov_len  = 4
353                 };
354                 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
355                 rc = smb_send_kvec(server, &smb_msg, &sent);
356                 if (rc < 0)
357                         goto unmask;
358
359                 total_len += sent;
360                 send_length += 4;
361         }
362
363         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
364
365         for (j = 0; j < num_rqst; j++) {
366                 iov = rqst[j].rq_iov;
367                 n_vec = rqst[j].rq_nvec;
368
369                 size = 0;
370                 for (i = 0; i < n_vec; i++) {
371                         dump_smb(iov[i].iov_base, iov[i].iov_len);
372                         size += iov[i].iov_len;
373                 }
374
375                 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
376
377                 rc = smb_send_kvec(server, &smb_msg, &sent);
378                 if (rc < 0)
379                         goto unmask;
380
381                 total_len += sent;
382
383                 /* now walk the page array and send each page in it */
384                 for (i = 0; i < rqst[j].rq_npages; i++) {
385                         struct bio_vec bvec;
386
387                         bvec.bv_page = rqst[j].rq_pages[i];
388                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
389                                              &bvec.bv_offset);
390
391                         iov_iter_bvec(&smb_msg.msg_iter, ITER_SOURCE,
392                                       &bvec, 1, bvec.bv_len);
393                         rc = smb_send_kvec(server, &smb_msg, &sent);
394                         if (rc < 0)
395                                 break;
396
397                         total_len += sent;
398                 }
399         }
400
401 unmask:
402         sigprocmask(SIG_SETMASK, &oldmask, NULL);
403
404         /*
405          * If signal is pending but we have already sent the whole packet to
406          * the server we need to return success status to allow a corresponding
407          * mid entry to be kept in the pending requests queue thus allowing
408          * to handle responses from the server by the client.
409          *
410          * If only part of the packet has been sent there is no need to hide
411          * interrupt because the session will be reconnected anyway, so there
412          * won't be any response from the server to handle.
413          */
414
415         if (signal_pending(current) && (total_len != send_length)) {
416                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
417                 rc = -ERESTARTSYS;
418         }
419
420         /* uncork it */
421         tcp_sock_set_cork(ssocket->sk, false);
422
423         if ((total_len > 0) && (total_len != send_length)) {
424                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
425                          send_length, total_len);
426                 /*
427                  * If we have only sent part of an SMB then the next SMB could
428                  * be taken as the remainder of this one. We need to kill the
429                  * socket so the server throws away the partial SMB
430                  */
431                 cifs_signal_cifsd_for_reconnect(server, false);
432                 trace_smb3_partial_send_reconnect(server->CurrentMid,
433                                                   server->conn_id, server->hostname);
434         }
435 smbd_done:
436         if (rc < 0 && rc != -EINTR)
437                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
438                          rc);
439         else if (rc > 0)
440                 rc = 0;
441 out:
442         cifs_in_send_dec(server);
443         return rc;
444 }
445
446 static int
447 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
448               struct smb_rqst *rqst, int flags)
449 {
450         struct kvec iov;
451         struct smb2_transform_hdr *tr_hdr;
452         struct smb_rqst cur_rqst[MAX_COMPOUND];
453         int rc;
454
455         if (!(flags & CIFS_TRANSFORM_REQ))
456                 return __smb_send_rqst(server, num_rqst, rqst);
457
458         if (num_rqst > MAX_COMPOUND - 1)
459                 return -ENOMEM;
460
461         if (!server->ops->init_transform_rq) {
462                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
463                 return -EIO;
464         }
465
466         tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
467         if (!tr_hdr)
468                 return -ENOMEM;
469
470         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
471         memset(&iov, 0, sizeof(iov));
472
473         iov.iov_base = tr_hdr;
474         iov.iov_len = sizeof(*tr_hdr);
475         cur_rqst[0].rq_iov = &iov;
476         cur_rqst[0].rq_nvec = 1;
477
478         rc = server->ops->init_transform_rq(server, num_rqst + 1,
479                                             &cur_rqst[0], rqst);
480         if (rc)
481                 goto out;
482
483         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
484         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
485 out:
486         kfree(tr_hdr);
487         return rc;
488 }
489
490 int
491 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
492          unsigned int smb_buf_length)
493 {
494         struct kvec iov[2];
495         struct smb_rqst rqst = { .rq_iov = iov,
496                                  .rq_nvec = 2 };
497
498         iov[0].iov_base = smb_buffer;
499         iov[0].iov_len = 4;
500         iov[1].iov_base = (char *)smb_buffer + 4;
501         iov[1].iov_len = smb_buf_length;
502
503         return __smb_send_rqst(server, 1, &rqst);
504 }
505
506 static int
507 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
508                       const int timeout, const int flags,
509                       unsigned int *instance)
510 {
511         long rc;
512         int *credits;
513         int optype;
514         long int t;
515         int scredits, in_flight;
516
517         if (timeout < 0)
518                 t = MAX_JIFFY_OFFSET;
519         else
520                 t = msecs_to_jiffies(timeout);
521
522         optype = flags & CIFS_OP_MASK;
523
524         *instance = 0;
525
526         credits = server->ops->get_credits_field(server, optype);
527         /* Since an echo is already inflight, no need to wait to send another */
528         if (*credits <= 0 && optype == CIFS_ECHO_OP)
529                 return -EAGAIN;
530
531         spin_lock(&server->req_lock);
532         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
533                 /* oplock breaks must not be held up */
534                 server->in_flight++;
535                 if (server->in_flight > server->max_in_flight)
536                         server->max_in_flight = server->in_flight;
537                 *credits -= 1;
538                 *instance = server->reconnect_instance;
539                 scredits = *credits;
540                 in_flight = server->in_flight;
541                 spin_unlock(&server->req_lock);
542
543                 trace_smb3_nblk_credits(server->CurrentMid,
544                                 server->conn_id, server->hostname, scredits, -1, in_flight);
545                 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
546                                 __func__, 1, scredits);
547
548                 return 0;
549         }
550
551         while (1) {
552                 spin_unlock(&server->req_lock);
553
554                 spin_lock(&server->srv_lock);
555                 if (server->tcpStatus == CifsExiting) {
556                         spin_unlock(&server->srv_lock);
557                         return -ENOENT;
558                 }
559                 spin_unlock(&server->srv_lock);
560
561                 spin_lock(&server->req_lock);
562                 if (*credits < num_credits) {
563                         scredits = *credits;
564                         spin_unlock(&server->req_lock);
565
566                         cifs_num_waiters_inc(server);
567                         rc = wait_event_killable_timeout(server->request_q,
568                                 has_credits(server, credits, num_credits), t);
569                         cifs_num_waiters_dec(server);
570                         if (!rc) {
571                                 spin_lock(&server->req_lock);
572                                 scredits = *credits;
573                                 in_flight = server->in_flight;
574                                 spin_unlock(&server->req_lock);
575
576                                 trace_smb3_credit_timeout(server->CurrentMid,
577                                                 server->conn_id, server->hostname, scredits,
578                                                 num_credits, in_flight);
579                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
580                                                 timeout);
581                                 return -EBUSY;
582                         }
583                         if (rc == -ERESTARTSYS)
584                                 return -ERESTARTSYS;
585                         spin_lock(&server->req_lock);
586                 } else {
587                         /*
588                          * For normal commands, reserve the last MAX_COMPOUND
589                          * credits to compound requests.
590                          * Otherwise these compounds could be permanently
591                          * starved for credits by single-credit requests.
592                          *
593                          * To prevent spinning CPU, block this thread until
594                          * there are >MAX_COMPOUND credits available.
595                          * But only do this is we already have a lot of
596                          * credits in flight to avoid triggering this check
597                          * for servers that are slow to hand out credits on
598                          * new sessions.
599                          */
600                         if (!optype && num_credits == 1 &&
601                             server->in_flight > 2 * MAX_COMPOUND &&
602                             *credits <= MAX_COMPOUND) {
603                                 spin_unlock(&server->req_lock);
604
605                                 cifs_num_waiters_inc(server);
606                                 rc = wait_event_killable_timeout(
607                                         server->request_q,
608                                         has_credits(server, credits,
609                                                     MAX_COMPOUND + 1),
610                                         t);
611                                 cifs_num_waiters_dec(server);
612                                 if (!rc) {
613                                         spin_lock(&server->req_lock);
614                                         scredits = *credits;
615                                         in_flight = server->in_flight;
616                                         spin_unlock(&server->req_lock);
617
618                                         trace_smb3_credit_timeout(
619                                                         server->CurrentMid,
620                                                         server->conn_id, server->hostname,
621                                                         scredits, num_credits, in_flight);
622                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
623                                                         timeout);
624                                         return -EBUSY;
625                                 }
626                                 if (rc == -ERESTARTSYS)
627                                         return -ERESTARTSYS;
628                                 spin_lock(&server->req_lock);
629                                 continue;
630                         }
631
632                         /*
633                          * Can not count locking commands against total
634                          * as they are allowed to block on server.
635                          */
636
637                         /* update # of requests on the wire to server */
638                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
639                                 *credits -= num_credits;
640                                 server->in_flight += num_credits;
641                                 if (server->in_flight > server->max_in_flight)
642                                         server->max_in_flight = server->in_flight;
643                                 *instance = server->reconnect_instance;
644                         }
645                         scredits = *credits;
646                         in_flight = server->in_flight;
647                         spin_unlock(&server->req_lock);
648
649                         trace_smb3_waitff_credits(server->CurrentMid,
650                                         server->conn_id, server->hostname, scredits,
651                                         -(num_credits), in_flight);
652                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
653                                         __func__, num_credits, scredits);
654                         break;
655                 }
656         }
657         return 0;
658 }
659
660 static int
661 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
662                       unsigned int *instance)
663 {
664         return wait_for_free_credits(server, 1, -1, flags,
665                                      instance);
666 }
667
668 static int
669 wait_for_compound_request(struct TCP_Server_Info *server, int num,
670                           const int flags, unsigned int *instance)
671 {
672         int *credits;
673         int scredits, in_flight;
674
675         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
676
677         spin_lock(&server->req_lock);
678         scredits = *credits;
679         in_flight = server->in_flight;
680
681         if (*credits < num) {
682                 /*
683                  * If the server is tight on resources or just gives us less
684                  * credits for other reasons (e.g. requests are coming out of
685                  * order and the server delays granting more credits until it
686                  * processes a missing mid) and we exhausted most available
687                  * credits there may be situations when we try to send
688                  * a compound request but we don't have enough credits. At this
689                  * point the client needs to decide if it should wait for
690                  * additional credits or fail the request. If at least one
691                  * request is in flight there is a high probability that the
692                  * server will return enough credits to satisfy this compound
693                  * request.
694                  *
695                  * Return immediately if no requests in flight since we will be
696                  * stuck on waiting for credits.
697                  */
698                 if (server->in_flight == 0) {
699                         spin_unlock(&server->req_lock);
700                         trace_smb3_insufficient_credits(server->CurrentMid,
701                                         server->conn_id, server->hostname, scredits,
702                                         num, in_flight);
703                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
704                                         __func__, in_flight, num, scredits);
705                         return -EDEADLK;
706                 }
707         }
708         spin_unlock(&server->req_lock);
709
710         return wait_for_free_credits(server, num, 60000, flags,
711                                      instance);
712 }
713
714 int
715 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
716                       unsigned int *num, struct cifs_credits *credits)
717 {
718         *num = size;
719         credits->value = 0;
720         credits->instance = server->reconnect_instance;
721         return 0;
722 }
723
724 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
725                         struct mid_q_entry **ppmidQ)
726 {
727         spin_lock(&ses->ses_lock);
728         if (ses->ses_status == SES_NEW) {
729                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
730                         (in_buf->Command != SMB_COM_NEGOTIATE)) {
731                         spin_unlock(&ses->ses_lock);
732                         return -EAGAIN;
733                 }
734                 /* else ok - we are setting up session */
735         }
736
737         if (ses->ses_status == SES_EXITING) {
738                 /* check if SMB session is bad because we are setting it up */
739                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
740                         spin_unlock(&ses->ses_lock);
741                         return -EAGAIN;
742                 }
743                 /* else ok - we are shutting down session */
744         }
745         spin_unlock(&ses->ses_lock);
746
747         *ppmidQ = alloc_mid(in_buf, ses->server);
748         if (*ppmidQ == NULL)
749                 return -ENOMEM;
750         spin_lock(&ses->server->mid_lock);
751         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
752         spin_unlock(&ses->server->mid_lock);
753         return 0;
754 }
755
756 static int
757 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
758 {
759         int error;
760
761         error = wait_event_state(server->response_q,
762                                  midQ->mid_state != MID_REQUEST_SUBMITTED,
763                                  (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
764         if (error < 0)
765                 return -ERESTARTSYS;
766
767         return 0;
768 }
769
770 struct mid_q_entry *
771 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
772 {
773         int rc;
774         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
775         struct mid_q_entry *mid;
776
777         if (rqst->rq_iov[0].iov_len != 4 ||
778             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
779                 return ERR_PTR(-EIO);
780
781         /* enable signing if server requires it */
782         if (server->sign)
783                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
784
785         mid = alloc_mid(hdr, server);
786         if (mid == NULL)
787                 return ERR_PTR(-ENOMEM);
788
789         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
790         if (rc) {
791                 release_mid(mid);
792                 return ERR_PTR(rc);
793         }
794
795         return mid;
796 }
797
798 /*
799  * Send a SMB request and set the callback function in the mid to handle
800  * the result. Caller is responsible for dealing with timeouts.
801  */
802 int
803 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
804                 mid_receive_t *receive, mid_callback_t *callback,
805                 mid_handle_t *handle, void *cbdata, const int flags,
806                 const struct cifs_credits *exist_credits)
807 {
808         int rc;
809         struct mid_q_entry *mid;
810         struct cifs_credits credits = { .value = 0, .instance = 0 };
811         unsigned int instance;
812         int optype;
813
814         optype = flags & CIFS_OP_MASK;
815
816         if ((flags & CIFS_HAS_CREDITS) == 0) {
817                 rc = wait_for_free_request(server, flags, &instance);
818                 if (rc)
819                         return rc;
820                 credits.value = 1;
821                 credits.instance = instance;
822         } else
823                 instance = exist_credits->instance;
824
825         cifs_server_lock(server);
826
827         /*
828          * We can't use credits obtained from the previous session to send this
829          * request. Check if there were reconnects after we obtained credits and
830          * return -EAGAIN in such cases to let callers handle it.
831          */
832         if (instance != server->reconnect_instance) {
833                 cifs_server_unlock(server);
834                 add_credits_and_wake_if(server, &credits, optype);
835                 return -EAGAIN;
836         }
837
838         mid = server->ops->setup_async_request(server, rqst);
839         if (IS_ERR(mid)) {
840                 cifs_server_unlock(server);
841                 add_credits_and_wake_if(server, &credits, optype);
842                 return PTR_ERR(mid);
843         }
844
845         mid->receive = receive;
846         mid->callback = callback;
847         mid->callback_data = cbdata;
848         mid->handle = handle;
849         mid->mid_state = MID_REQUEST_SUBMITTED;
850
851         /* put it on the pending_mid_q */
852         spin_lock(&server->mid_lock);
853         list_add_tail(&mid->qhead, &server->pending_mid_q);
854         spin_unlock(&server->mid_lock);
855
856         /*
857          * Need to store the time in mid before calling I/O. For call_async,
858          * I/O response may come back and free the mid entry on another thread.
859          */
860         cifs_save_when_sent(mid);
861         rc = smb_send_rqst(server, 1, rqst, flags);
862
863         if (rc < 0) {
864                 revert_current_mid(server, mid->credits);
865                 server->sequence_number -= 2;
866                 delete_mid(mid);
867         }
868
869         cifs_server_unlock(server);
870
871         if (rc == 0)
872                 return 0;
873
874         add_credits_and_wake_if(server, &credits, optype);
875         return rc;
876 }
877
878 /*
879  *
880  * Send an SMB Request.  No response info (other than return code)
881  * needs to be parsed.
882  *
883  * flags indicate the type of request buffer and how long to wait
884  * and whether to log NT STATUS code (error) before mapping it to POSIX error
885  *
886  */
887 int
888 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
889                  char *in_buf, int flags)
890 {
891         int rc;
892         struct kvec iov[1];
893         struct kvec rsp_iov;
894         int resp_buf_type;
895
896         iov[0].iov_base = in_buf;
897         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
898         flags |= CIFS_NO_RSP_BUF;
899         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
900         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
901
902         return rc;
903 }
904
905 static int
906 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
907 {
908         int rc = 0;
909
910         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
911                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
912
913         spin_lock(&server->mid_lock);
914         switch (mid->mid_state) {
915         case MID_RESPONSE_RECEIVED:
916                 spin_unlock(&server->mid_lock);
917                 return rc;
918         case MID_RETRY_NEEDED:
919                 rc = -EAGAIN;
920                 break;
921         case MID_RESPONSE_MALFORMED:
922                 rc = -EIO;
923                 break;
924         case MID_SHUTDOWN:
925                 rc = -EHOSTDOWN;
926                 break;
927         default:
928                 if (!(mid->mid_flags & MID_DELETED)) {
929                         list_del_init(&mid->qhead);
930                         mid->mid_flags |= MID_DELETED;
931                 }
932                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
933                          __func__, mid->mid, mid->mid_state);
934                 rc = -EIO;
935         }
936         spin_unlock(&server->mid_lock);
937
938         release_mid(mid);
939         return rc;
940 }
941
942 static inline int
943 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
944             struct mid_q_entry *mid)
945 {
946         return server->ops->send_cancel ?
947                                 server->ops->send_cancel(server, rqst, mid) : 0;
948 }
949
950 int
951 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
952                    bool log_error)
953 {
954         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
955
956         dump_smb(mid->resp_buf, min_t(u32, 92, len));
957
958         /* convert the length into a more usable form */
959         if (server->sign) {
960                 struct kvec iov[2];
961                 int rc = 0;
962                 struct smb_rqst rqst = { .rq_iov = iov,
963                                          .rq_nvec = 2 };
964
965                 iov[0].iov_base = mid->resp_buf;
966                 iov[0].iov_len = 4;
967                 iov[1].iov_base = (char *)mid->resp_buf + 4;
968                 iov[1].iov_len = len - 4;
969                 /* FIXME: add code to kill session */
970                 rc = cifs_verify_signature(&rqst, server,
971                                            mid->sequence_number);
972                 if (rc)
973                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
974                                  rc);
975         }
976
977         /* BB special case reconnect tid and uid here? */
978         return map_and_check_smb_error(mid, log_error);
979 }
980
981 struct mid_q_entry *
982 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
983                    struct smb_rqst *rqst)
984 {
985         int rc;
986         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
987         struct mid_q_entry *mid;
988
989         if (rqst->rq_iov[0].iov_len != 4 ||
990             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
991                 return ERR_PTR(-EIO);
992
993         rc = allocate_mid(ses, hdr, &mid);
994         if (rc)
995                 return ERR_PTR(rc);
996         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
997         if (rc) {
998                 delete_mid(mid);
999                 return ERR_PTR(rc);
1000         }
1001         return mid;
1002 }
1003
1004 static void
1005 cifs_compound_callback(struct mid_q_entry *mid)
1006 {
1007         struct TCP_Server_Info *server = mid->server;
1008         struct cifs_credits credits;
1009
1010         credits.value = server->ops->get_credits(mid);
1011         credits.instance = server->reconnect_instance;
1012
1013         add_credits(server, &credits, mid->optype);
1014 }
1015
1016 static void
1017 cifs_compound_last_callback(struct mid_q_entry *mid)
1018 {
1019         cifs_compound_callback(mid);
1020         cifs_wake_up_task(mid);
1021 }
1022
1023 static void
1024 cifs_cancelled_callback(struct mid_q_entry *mid)
1025 {
1026         cifs_compound_callback(mid);
1027         release_mid(mid);
1028 }
1029
1030 /*
1031  * Return a channel (master if none) of @ses that can be used to send
1032  * regular requests.
1033  *
1034  * If we are currently binding a new channel (negprot/sess.setup),
1035  * return the new incomplete channel.
1036  */
1037 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1038 {
1039         uint index = 0;
1040
1041         if (!ses)
1042                 return NULL;
1043
1044         /* round robin */
1045         index = (uint)atomic_inc_return(&ses->chan_seq);
1046
1047         spin_lock(&ses->chan_lock);
1048         index %= ses->chan_count;
1049         spin_unlock(&ses->chan_lock);
1050
1051         return ses->chans[index].server;
1052 }
1053
1054 int
1055 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1056                    struct TCP_Server_Info *server,
1057                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1058                    int *resp_buf_type, struct kvec *resp_iov)
1059 {
1060         int i, j, optype, rc = 0;
1061         struct mid_q_entry *midQ[MAX_COMPOUND];
1062         bool cancelled_mid[MAX_COMPOUND] = {false};
1063         struct cifs_credits credits[MAX_COMPOUND] = {
1064                 { .value = 0, .instance = 0 }
1065         };
1066         unsigned int instance;
1067         char *buf;
1068
1069         optype = flags & CIFS_OP_MASK;
1070
1071         for (i = 0; i < num_rqst; i++)
1072                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1073
1074         if (!ses || !ses->server || !server) {
1075                 cifs_dbg(VFS, "Null session\n");
1076                 return -EIO;
1077         }
1078
1079         spin_lock(&server->srv_lock);
1080         if (server->tcpStatus == CifsExiting) {
1081                 spin_unlock(&server->srv_lock);
1082                 return -ENOENT;
1083         }
1084         spin_unlock(&server->srv_lock);
1085
1086         /*
1087          * Wait for all the requests to become available.
1088          * This approach still leaves the possibility to be stuck waiting for
1089          * credits if the server doesn't grant credits to the outstanding
1090          * requests and if the client is completely idle, not generating any
1091          * other requests.
1092          * This can be handled by the eventual session reconnect.
1093          */
1094         rc = wait_for_compound_request(server, num_rqst, flags,
1095                                        &instance);
1096         if (rc)
1097                 return rc;
1098
1099         for (i = 0; i < num_rqst; i++) {
1100                 credits[i].value = 1;
1101                 credits[i].instance = instance;
1102         }
1103
1104         /*
1105          * Make sure that we sign in the same order that we send on this socket
1106          * and avoid races inside tcp sendmsg code that could cause corruption
1107          * of smb data.
1108          */
1109
1110         cifs_server_lock(server);
1111
1112         /*
1113          * All the parts of the compound chain belong obtained credits from the
1114          * same session. We can not use credits obtained from the previous
1115          * session to send this request. Check if there were reconnects after
1116          * we obtained credits and return -EAGAIN in such cases to let callers
1117          * handle it.
1118          */
1119         if (instance != server->reconnect_instance) {
1120                 cifs_server_unlock(server);
1121                 for (j = 0; j < num_rqst; j++)
1122                         add_credits(server, &credits[j], optype);
1123                 return -EAGAIN;
1124         }
1125
1126         for (i = 0; i < num_rqst; i++) {
1127                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1128                 if (IS_ERR(midQ[i])) {
1129                         revert_current_mid(server, i);
1130                         for (j = 0; j < i; j++)
1131                                 delete_mid(midQ[j]);
1132                         cifs_server_unlock(server);
1133
1134                         /* Update # of requests on wire to server */
1135                         for (j = 0; j < num_rqst; j++)
1136                                 add_credits(server, &credits[j], optype);
1137                         return PTR_ERR(midQ[i]);
1138                 }
1139
1140                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1141                 midQ[i]->optype = optype;
1142                 /*
1143                  * Invoke callback for every part of the compound chain
1144                  * to calculate credits properly. Wake up this thread only when
1145                  * the last element is received.
1146                  */
1147                 if (i < num_rqst - 1)
1148                         midQ[i]->callback = cifs_compound_callback;
1149                 else
1150                         midQ[i]->callback = cifs_compound_last_callback;
1151         }
1152         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1153
1154         for (i = 0; i < num_rqst; i++)
1155                 cifs_save_when_sent(midQ[i]);
1156
1157         if (rc < 0) {
1158                 revert_current_mid(server, num_rqst);
1159                 server->sequence_number -= 2;
1160         }
1161
1162         cifs_server_unlock(server);
1163
1164         /*
1165          * If sending failed for some reason or it is an oplock break that we
1166          * will not receive a response to - return credits back
1167          */
1168         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1169                 for (i = 0; i < num_rqst; i++)
1170                         add_credits(server, &credits[i], optype);
1171                 goto out;
1172         }
1173
1174         /*
1175          * At this point the request is passed to the network stack - we assume
1176          * that any credits taken from the server structure on the client have
1177          * been spent and we can't return them back. Once we receive responses
1178          * we will collect credits granted by the server in the mid callbacks
1179          * and add those credits to the server structure.
1180          */
1181
1182         /*
1183          * Compounding is never used during session establish.
1184          */
1185         spin_lock(&ses->ses_lock);
1186         if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1187                 spin_unlock(&ses->ses_lock);
1188
1189                 cifs_server_lock(server);
1190                 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1191                 cifs_server_unlock(server);
1192
1193                 spin_lock(&ses->ses_lock);
1194         }
1195         spin_unlock(&ses->ses_lock);
1196
1197         for (i = 0; i < num_rqst; i++) {
1198                 rc = wait_for_response(server, midQ[i]);
1199                 if (rc != 0)
1200                         break;
1201         }
1202         if (rc != 0) {
1203                 for (; i < num_rqst; i++) {
1204                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1205                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1206                         send_cancel(server, &rqst[i], midQ[i]);
1207                         spin_lock(&server->mid_lock);
1208                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1209                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1210                                 midQ[i]->callback = cifs_cancelled_callback;
1211                                 cancelled_mid[i] = true;
1212                                 credits[i].value = 0;
1213                         }
1214                         spin_unlock(&server->mid_lock);
1215                 }
1216         }
1217
1218         for (i = 0; i < num_rqst; i++) {
1219                 if (rc < 0)
1220                         goto out;
1221
1222                 rc = cifs_sync_mid_result(midQ[i], server);
1223                 if (rc != 0) {
1224                         /* mark this mid as cancelled to not free it below */
1225                         cancelled_mid[i] = true;
1226                         goto out;
1227                 }
1228
1229                 if (!midQ[i]->resp_buf ||
1230                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1231                         rc = -EIO;
1232                         cifs_dbg(FYI, "Bad MID state?\n");
1233                         goto out;
1234                 }
1235
1236                 buf = (char *)midQ[i]->resp_buf;
1237                 resp_iov[i].iov_base = buf;
1238                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1239                         HEADER_PREAMBLE_SIZE(server);
1240
1241                 if (midQ[i]->large_buf)
1242                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1243                 else
1244                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1245
1246                 rc = server->ops->check_receive(midQ[i], server,
1247                                                      flags & CIFS_LOG_ERROR);
1248
1249                 /* mark it so buf will not be freed by delete_mid */
1250                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1251                         midQ[i]->resp_buf = NULL;
1252
1253         }
1254
1255         /*
1256          * Compounding is never used during session establish.
1257          */
1258         spin_lock(&ses->ses_lock);
1259         if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1260                 struct kvec iov = {
1261                         .iov_base = resp_iov[0].iov_base,
1262                         .iov_len = resp_iov[0].iov_len
1263                 };
1264                 spin_unlock(&ses->ses_lock);
1265                 cifs_server_lock(server);
1266                 smb311_update_preauth_hash(ses, server, &iov, 1);
1267                 cifs_server_unlock(server);
1268                 spin_lock(&ses->ses_lock);
1269         }
1270         spin_unlock(&ses->ses_lock);
1271
1272 out:
1273         /*
1274          * This will dequeue all mids. After this it is important that the
1275          * demultiplex_thread will not process any of these mids any futher.
1276          * This is prevented above by using a noop callback that will not
1277          * wake this thread except for the very last PDU.
1278          */
1279         for (i = 0; i < num_rqst; i++) {
1280                 if (!cancelled_mid[i])
1281                         delete_mid(midQ[i]);
1282         }
1283
1284         return rc;
1285 }
1286
1287 int
1288 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1289                struct TCP_Server_Info *server,
1290                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1291                struct kvec *resp_iov)
1292 {
1293         return compound_send_recv(xid, ses, server, flags, 1,
1294                                   rqst, resp_buf_type, resp_iov);
1295 }
1296
1297 int
1298 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1299              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1300              const int flags, struct kvec *resp_iov)
1301 {
1302         struct smb_rqst rqst;
1303         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1304         int rc;
1305
1306         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1307                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1308                                         GFP_KERNEL);
1309                 if (!new_iov) {
1310                         /* otherwise cifs_send_recv below sets resp_buf_type */
1311                         *resp_buf_type = CIFS_NO_BUFFER;
1312                         return -ENOMEM;
1313                 }
1314         } else
1315                 new_iov = s_iov;
1316
1317         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1318         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1319
1320         new_iov[0].iov_base = new_iov[1].iov_base;
1321         new_iov[0].iov_len = 4;
1322         new_iov[1].iov_base += 4;
1323         new_iov[1].iov_len -= 4;
1324
1325         memset(&rqst, 0, sizeof(struct smb_rqst));
1326         rqst.rq_iov = new_iov;
1327         rqst.rq_nvec = n_vec + 1;
1328
1329         rc = cifs_send_recv(xid, ses, ses->server,
1330                             &rqst, resp_buf_type, flags, resp_iov);
1331         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1332                 kfree(new_iov);
1333         return rc;
1334 }
1335
1336 int
1337 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1338             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1339             int *pbytes_returned, const int flags)
1340 {
1341         int rc = 0;
1342         struct mid_q_entry *midQ;
1343         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1344         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1345         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1346         struct cifs_credits credits = { .value = 1, .instance = 0 };
1347         struct TCP_Server_Info *server;
1348
1349         if (ses == NULL) {
1350                 cifs_dbg(VFS, "Null smb session\n");
1351                 return -EIO;
1352         }
1353         server = ses->server;
1354         if (server == NULL) {
1355                 cifs_dbg(VFS, "Null tcp session\n");
1356                 return -EIO;
1357         }
1358
1359         spin_lock(&server->srv_lock);
1360         if (server->tcpStatus == CifsExiting) {
1361                 spin_unlock(&server->srv_lock);
1362                 return -ENOENT;
1363         }
1364         spin_unlock(&server->srv_lock);
1365
1366         /* Ensure that we do not send more than 50 overlapping requests
1367            to the same server. We may make this configurable later or
1368            use ses->maxReq */
1369
1370         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1371                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1372                                 len);
1373                 return -EIO;
1374         }
1375
1376         rc = wait_for_free_request(server, flags, &credits.instance);
1377         if (rc)
1378                 return rc;
1379
1380         /* make sure that we sign in the same order that we send on this socket
1381            and avoid races inside tcp sendmsg code that could cause corruption
1382            of smb data */
1383
1384         cifs_server_lock(server);
1385
1386         rc = allocate_mid(ses, in_buf, &midQ);
1387         if (rc) {
1388                 cifs_server_unlock(server);
1389                 /* Update # of requests on wire to server */
1390                 add_credits(server, &credits, 0);
1391                 return rc;
1392         }
1393
1394         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1395         if (rc) {
1396                 cifs_server_unlock(server);
1397                 goto out;
1398         }
1399
1400         midQ->mid_state = MID_REQUEST_SUBMITTED;
1401
1402         rc = smb_send(server, in_buf, len);
1403         cifs_save_when_sent(midQ);
1404
1405         if (rc < 0)
1406                 server->sequence_number -= 2;
1407
1408         cifs_server_unlock(server);
1409
1410         if (rc < 0)
1411                 goto out;
1412
1413         rc = wait_for_response(server, midQ);
1414         if (rc != 0) {
1415                 send_cancel(server, &rqst, midQ);
1416                 spin_lock(&server->mid_lock);
1417                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1418                         /* no longer considered to be "in-flight" */
1419                         midQ->callback = release_mid;
1420                         spin_unlock(&server->mid_lock);
1421                         add_credits(server, &credits, 0);
1422                         return rc;
1423                 }
1424                 spin_unlock(&server->mid_lock);
1425         }
1426
1427         rc = cifs_sync_mid_result(midQ, server);
1428         if (rc != 0) {
1429                 add_credits(server, &credits, 0);
1430                 return rc;
1431         }
1432
1433         if (!midQ->resp_buf || !out_buf ||
1434             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1435                 rc = -EIO;
1436                 cifs_server_dbg(VFS, "Bad MID state?\n");
1437                 goto out;
1438         }
1439
1440         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1441         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1442         rc = cifs_check_receive(midQ, server, 0);
1443 out:
1444         delete_mid(midQ);
1445         add_credits(server, &credits, 0);
1446
1447         return rc;
1448 }
1449
1450 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1451    blocking lock to return. */
1452
1453 static int
1454 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1455                         struct smb_hdr *in_buf,
1456                         struct smb_hdr *out_buf)
1457 {
1458         int bytes_returned;
1459         struct cifs_ses *ses = tcon->ses;
1460         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1461
1462         /* We just modify the current in_buf to change
1463            the type of lock from LOCKING_ANDX_SHARED_LOCK
1464            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1465            LOCKING_ANDX_CANCEL_LOCK. */
1466
1467         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1468         pSMB->Timeout = 0;
1469         pSMB->hdr.Mid = get_next_mid(ses->server);
1470
1471         return SendReceive(xid, ses, in_buf, out_buf,
1472                         &bytes_returned, 0);
1473 }
1474
1475 int
1476 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1477             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1478             int *pbytes_returned)
1479 {
1480         int rc = 0;
1481         int rstart = 0;
1482         struct mid_q_entry *midQ;
1483         struct cifs_ses *ses;
1484         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1485         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1486         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1487         unsigned int instance;
1488         struct TCP_Server_Info *server;
1489
1490         if (tcon == NULL || tcon->ses == NULL) {
1491                 cifs_dbg(VFS, "Null smb session\n");
1492                 return -EIO;
1493         }
1494         ses = tcon->ses;
1495         server = ses->server;
1496
1497         if (server == NULL) {
1498                 cifs_dbg(VFS, "Null tcp session\n");
1499                 return -EIO;
1500         }
1501
1502         spin_lock(&server->srv_lock);
1503         if (server->tcpStatus == CifsExiting) {
1504                 spin_unlock(&server->srv_lock);
1505                 return -ENOENT;
1506         }
1507         spin_unlock(&server->srv_lock);
1508
1509         /* Ensure that we do not send more than 50 overlapping requests
1510            to the same server. We may make this configurable later or
1511            use ses->maxReq */
1512
1513         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1514                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1515                               len);
1516                 return -EIO;
1517         }
1518
1519         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1520         if (rc)
1521                 return rc;
1522
1523         /* make sure that we sign in the same order that we send on this socket
1524            and avoid races inside tcp sendmsg code that could cause corruption
1525            of smb data */
1526
1527         cifs_server_lock(server);
1528
1529         rc = allocate_mid(ses, in_buf, &midQ);
1530         if (rc) {
1531                 cifs_server_unlock(server);
1532                 return rc;
1533         }
1534
1535         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1536         if (rc) {
1537                 delete_mid(midQ);
1538                 cifs_server_unlock(server);
1539                 return rc;
1540         }
1541
1542         midQ->mid_state = MID_REQUEST_SUBMITTED;
1543         rc = smb_send(server, in_buf, len);
1544         cifs_save_when_sent(midQ);
1545
1546         if (rc < 0)
1547                 server->sequence_number -= 2;
1548
1549         cifs_server_unlock(server);
1550
1551         if (rc < 0) {
1552                 delete_mid(midQ);
1553                 return rc;
1554         }
1555
1556         /* Wait for a reply - allow signals to interrupt. */
1557         rc = wait_event_interruptible(server->response_q,
1558                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1559                 ((server->tcpStatus != CifsGood) &&
1560                  (server->tcpStatus != CifsNew)));
1561
1562         /* Were we interrupted by a signal ? */
1563         spin_lock(&server->srv_lock);
1564         if ((rc == -ERESTARTSYS) &&
1565                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1566                 ((server->tcpStatus == CifsGood) ||
1567                  (server->tcpStatus == CifsNew))) {
1568                 spin_unlock(&server->srv_lock);
1569
1570                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1571                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1572                            blocking lock to return. */
1573                         rc = send_cancel(server, &rqst, midQ);
1574                         if (rc) {
1575                                 delete_mid(midQ);
1576                                 return rc;
1577                         }
1578                 } else {
1579                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1580                            to cause the blocking lock to return. */
1581
1582                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1583
1584                         /* If we get -ENOLCK back the lock may have
1585                            already been removed. Don't exit in this case. */
1586                         if (rc && rc != -ENOLCK) {
1587                                 delete_mid(midQ);
1588                                 return rc;
1589                         }
1590                 }
1591
1592                 rc = wait_for_response(server, midQ);
1593                 if (rc) {
1594                         send_cancel(server, &rqst, midQ);
1595                         spin_lock(&server->mid_lock);
1596                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1597                                 /* no longer considered to be "in-flight" */
1598                                 midQ->callback = release_mid;
1599                                 spin_unlock(&server->mid_lock);
1600                                 return rc;
1601                         }
1602                         spin_unlock(&server->mid_lock);
1603                 }
1604
1605                 /* We got the response - restart system call. */
1606                 rstart = 1;
1607                 spin_lock(&server->srv_lock);
1608         }
1609         spin_unlock(&server->srv_lock);
1610
1611         rc = cifs_sync_mid_result(midQ, server);
1612         if (rc != 0)
1613                 return rc;
1614
1615         /* rcvd frame is ok */
1616         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1617                 rc = -EIO;
1618                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1619                 goto out;
1620         }
1621
1622         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1623         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1624         rc = cifs_check_receive(midQ, server, 0);
1625 out:
1626         delete_mid(midQ);
1627         if (rstart && rc == -EACCES)
1628                 return -ERESTARTSYS;
1629         return rc;
1630 }
1631
1632 /*
1633  * Discard any remaining data in the current SMB. To do this, we borrow the
1634  * current bigbuf.
1635  */
1636 int
1637 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1638 {
1639         unsigned int rfclen = server->pdu_size;
1640         int remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1641                 server->total_read;
1642
1643         while (remaining > 0) {
1644                 int length;
1645
1646                 length = cifs_discard_from_socket(server,
1647                                 min_t(size_t, remaining,
1648                                       CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1649                 if (length < 0)
1650                         return length;
1651                 server->total_read += length;
1652                 remaining -= length;
1653         }
1654
1655         return 0;
1656 }
1657
1658 static int
1659 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1660                      bool malformed)
1661 {
1662         int length;
1663
1664         length = cifs_discard_remaining_data(server);
1665         dequeue_mid(mid, malformed);
1666         mid->resp_buf = server->smallbuf;
1667         server->smallbuf = NULL;
1668         return length;
1669 }
1670
1671 static int
1672 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1673 {
1674         struct cifs_readdata *rdata = mid->callback_data;
1675
1676         return  __cifs_readv_discard(server, mid, rdata->result);
1677 }
1678
1679 int
1680 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1681 {
1682         int length, len;
1683         unsigned int data_offset, data_len;
1684         struct cifs_readdata *rdata = mid->callback_data;
1685         char *buf = server->smallbuf;
1686         unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1687         bool use_rdma_mr = false;
1688
1689         cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1690                  __func__, mid->mid, rdata->offset, rdata->bytes);
1691
1692         /*
1693          * read the rest of READ_RSP header (sans Data array), or whatever we
1694          * can if there's not enough data. At this point, we've read down to
1695          * the Mid.
1696          */
1697         len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1698                                                         HEADER_SIZE(server) + 1;
1699
1700         length = cifs_read_from_socket(server,
1701                                        buf + HEADER_SIZE(server) - 1, len);
1702         if (length < 0)
1703                 return length;
1704         server->total_read += length;
1705
1706         if (server->ops->is_session_expired &&
1707             server->ops->is_session_expired(buf)) {
1708                 cifs_reconnect(server, true);
1709                 return -1;
1710         }
1711
1712         if (server->ops->is_status_pending &&
1713             server->ops->is_status_pending(buf, server)) {
1714                 cifs_discard_remaining_data(server);
1715                 return -1;
1716         }
1717
1718         /* set up first two iov for signature check and to get credits */
1719         rdata->iov[0].iov_base = buf;
1720         rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1721         rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1722         rdata->iov[1].iov_len =
1723                 server->total_read - HEADER_PREAMBLE_SIZE(server);
1724         cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1725                  rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1726         cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1727                  rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1728
1729         /* Was the SMB read successful? */
1730         rdata->result = server->ops->map_error(buf, false);
1731         if (rdata->result != 0) {
1732                 cifs_dbg(FYI, "%s: server returned error %d\n",
1733                          __func__, rdata->result);
1734                 /* normal error on read response */
1735                 return __cifs_readv_discard(server, mid, false);
1736         }
1737
1738         /* Is there enough to get to the rest of the READ_RSP header? */
1739         if (server->total_read < server->vals->read_rsp_size) {
1740                 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1741                          __func__, server->total_read,
1742                          server->vals->read_rsp_size);
1743                 rdata->result = -EIO;
1744                 return cifs_readv_discard(server, mid);
1745         }
1746
1747         data_offset = server->ops->read_data_offset(buf) +
1748                 HEADER_PREAMBLE_SIZE(server);
1749         if (data_offset < server->total_read) {
1750                 /*
1751                  * win2k8 sometimes sends an offset of 0 when the read
1752                  * is beyond the EOF. Treat it as if the data starts just after
1753                  * the header.
1754                  */
1755                 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1756                          __func__, data_offset);
1757                 data_offset = server->total_read;
1758         } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1759                 /* data_offset is beyond the end of smallbuf */
1760                 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1761                          __func__, data_offset);
1762                 rdata->result = -EIO;
1763                 return cifs_readv_discard(server, mid);
1764         }
1765
1766         cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1767                  __func__, server->total_read, data_offset);
1768
1769         len = data_offset - server->total_read;
1770         if (len > 0) {
1771                 /* read any junk before data into the rest of smallbuf */
1772                 length = cifs_read_from_socket(server,
1773                                                buf + server->total_read, len);
1774                 if (length < 0)
1775                         return length;
1776                 server->total_read += length;
1777         }
1778
1779         /* how much data is in the response? */
1780 #ifdef CONFIG_CIFS_SMB_DIRECT
1781         use_rdma_mr = rdata->mr;
1782 #endif
1783         data_len = server->ops->read_data_length(buf, use_rdma_mr);
1784         if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1785                 /* data_len is corrupt -- discard frame */
1786                 rdata->result = -EIO;
1787                 return cifs_readv_discard(server, mid);
1788         }
1789
1790         length = rdata->read_into_pages(server, rdata, data_len);
1791         if (length < 0)
1792                 return length;
1793
1794         server->total_read += length;
1795
1796         cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1797                  server->total_read, buflen, data_len);
1798
1799         /* discard anything left over */
1800         if (server->total_read < buflen)
1801                 return cifs_readv_discard(server, mid);
1802
1803         dequeue_mid(mid, false);
1804         mid->resp_buf = server->smallbuf;
1805         server->smallbuf = NULL;
1806         return length;
1807 }