4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/highmem.h>
32 #include <asm/uaccess.h>
33 #include <asm/processor.h>
34 #include <linux/mempool.h>
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
41 cifs_wake_up_task(struct mid_q_entry *mid)
43 wake_up_process(mid->callback_data);
47 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
49 struct mid_q_entry *temp;
52 cERROR(1, "Null TCP session in AllocMidQEntry");
56 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
60 memset(temp, 0, sizeof(struct mid_q_entry));
61 temp->mid = smb_buffer->Mid; /* always LE */
62 temp->pid = current->pid;
63 temp->command = cpu_to_le16(smb_buffer->Command);
64 cFYI(1, "For smb_command %d", smb_buffer->Command);
65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 /* when mid allocated can be before when sent */
67 temp->when_alloc = jiffies;
68 temp->server = server;
71 * The default is for the mid to be synchronous, so the
72 * default callback just wakes up the current task.
74 temp->callback = cifs_wake_up_task;
75 temp->callback_data = current;
78 atomic_inc(&midCount);
79 temp->mid_state = MID_REQUEST_ALLOCATED;
84 DeleteMidQEntry(struct mid_q_entry *midEntry)
86 #ifdef CONFIG_CIFS_STATS2
87 __le16 command = midEntry->server->vals->lock_cmd;
90 midEntry->mid_state = MID_FREE;
91 atomic_dec(&midCount);
92 if (midEntry->large_buf)
93 cifs_buf_release(midEntry->resp_buf);
95 cifs_small_buf_release(midEntry->resp_buf);
96 #ifdef CONFIG_CIFS_STATS2
98 /* commands taking longer than one second are indications that
99 something is wrong, unless it is quite a slow link or server */
100 if ((now - midEntry->when_alloc) > HZ) {
101 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
102 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
103 midEntry->command, midEntry->mid);
104 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
105 now - midEntry->when_alloc,
106 now - midEntry->when_sent,
107 now - midEntry->when_received);
111 mempool_free(midEntry, cifs_mid_poolp);
115 cifs_delete_mid(struct mid_q_entry *mid)
117 spin_lock(&GlobalMid_Lock);
118 list_del(&mid->qhead);
119 spin_unlock(&GlobalMid_Lock);
121 DeleteMidQEntry(mid);
125 * smb_send_kvec - send an array of kvecs to the server
126 * @server: Server to send the data to
127 * @iov: Pointer to array of kvecs
128 * @n_vec: length of kvec array
129 * @sent: amount of data sent on socket is stored here
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
135 smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
140 struct msghdr smb_msg;
141 unsigned int remaining;
142 size_t first_vec = 0;
143 struct socket *ssocket = server->ssocket;
147 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
148 smb_msg.msg_namelen = sizeof(struct sockaddr);
149 smb_msg.msg_control = NULL;
150 smb_msg.msg_controllen = 0;
151 if (server->noblocksnd)
152 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
154 smb_msg.msg_flags = MSG_NOSIGNAL;
157 for (i = 0; i < n_vec; i++)
158 remaining += iov[i].iov_len;
163 * If blocking send, we try 3 times, since each can block
164 * for 5 seconds. For nonblocking we have to try more
165 * but wait increasing amounts of time allowing time for
166 * socket to clear. The overall time we wait in either
167 * case to send on the socket is about 15 seconds.
168 * Similarly we wait for 15 seconds for a response from
169 * the server in SendReceive[2] for the server to send
170 * a response back for most types of requests (except
171 * SMB Write past end of file which can be slow, and
172 * blocking lock operations). NFS waits slightly longer
173 * than CIFS, but this can make it take longer for
174 * nonresponsive servers to be detected and 15 seconds
175 * is more than enough time for modern networks to
176 * send a packet. In most cases if we fail to send
177 * after the retries we will kill the socket and
178 * reconnect which may clear the network problem.
180 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
181 n_vec - first_vec, remaining);
182 if (rc == -ENOSPC || rc == -EAGAIN) {
184 * Catch if a low level driver returns -ENOSPC. This
185 * WARN_ON will be removed by 3.10 if no one reports
188 WARN_ON_ONCE(rc == -ENOSPC);
190 if (i >= 14 || (!server->noblocksnd && (i > 2))) {
191 cERROR(1, "sends on sock %p stuck for 15 "
203 /* send was at least partially successful */
206 if (rc == remaining) {
211 if (rc > remaining) {
212 cERROR(1, "sent %d requested %d", rc, remaining);
217 /* should never happen, letting socket clear before
218 retrying is our only obvious option here */
219 cERROR(1, "tcp sent no data");
226 /* the line below resets i */
227 for (i = first_vec; i < n_vec; i++) {
228 if (iov[i].iov_len) {
229 if (rc > iov[i].iov_len) {
230 rc -= iov[i].iov_len;
233 iov[i].iov_base += rc;
234 iov[i].iov_len -= rc;
241 i = 0; /* in case we get ENOSPC on the next send */
248 * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
249 * @rqst: pointer to smb_rqst
250 * @idx: index into the array of the page
251 * @iov: pointer to struct kvec that will hold the result
253 * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
254 * The page will be kmapped and the address placed into iov_base. The length
255 * will then be adjusted according to the ptailoff.
258 cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
262 * FIXME: We could avoid this kmap altogether if we used
263 * kernel_sendpage instead of kernel_sendmsg. That will only
264 * work if signing is disabled though as sendpage inlines the
265 * page directly into the fraglist. If userspace modifies the
266 * page after we calculate the signature, then the server will
267 * reject it and may break the connection. kernel_sendmsg does
268 * an extra copy of the data and avoids that issue.
270 iov->iov_base = kmap(rqst->rq_pages[idx]);
272 /* if last page, don't send beyond this offset into page */
273 if (idx == (rqst->rq_npages - 1))
274 iov->iov_len = rqst->rq_tailsz;
276 iov->iov_len = rqst->rq_pagesz;
280 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
283 struct kvec *iov = rqst->rq_iov;
284 int n_vec = rqst->rq_nvec;
285 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
287 size_t total_len = 0, sent;
288 struct socket *ssocket = server->ssocket;
294 cFYI(1, "Sending smb: smb_len=%u", smb_buf_length);
295 dump_smb(iov[0].iov_base, iov[0].iov_len);
297 /* cork the socket */
298 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
299 (char *)&val, sizeof(val));
301 rc = smb_send_kvec(server, iov, n_vec, &sent);
307 /* now walk the page array and send each page in it */
308 for (i = 0; i < rqst->rq_npages; i++) {
311 cifs_rqst_page_to_kvec(rqst, i, &p_iov);
312 rc = smb_send_kvec(server, &p_iov, 1, &sent);
313 kunmap(rqst->rq_pages[i]);
323 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
324 (char *)&val, sizeof(val));
326 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
327 cFYI(1, "partial send (wanted=%u sent=%zu): terminating "
328 "session", smb_buf_length + 4, total_len);
330 * If we have only sent part of an SMB then the next SMB could
331 * be taken as the remainder of this one. We need to kill the
332 * socket so the server throws away the partial SMB
334 server->tcpStatus = CifsNeedReconnect;
337 if (rc < 0 && rc != -EINTR)
338 cERROR(1, "Error %d sending data on socket to server", rc);
346 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
348 struct smb_rqst rqst = { .rq_iov = iov,
351 return smb_send_rqst(server, &rqst);
355 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
356 unsigned int smb_buf_length)
360 iov.iov_base = smb_buffer;
361 iov.iov_len = smb_buf_length + 4;
363 return smb_sendv(server, &iov, 1);
367 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
372 spin_lock(&server->req_lock);
373 if (timeout == CIFS_ASYNC_OP) {
374 /* oplock breaks must not be held up */
377 spin_unlock(&server->req_lock);
383 spin_unlock(&server->req_lock);
384 cifs_num_waiters_inc(server);
385 rc = wait_event_killable(server->request_q,
386 has_credits(server, credits));
387 cifs_num_waiters_dec(server);
390 spin_lock(&server->req_lock);
392 if (server->tcpStatus == CifsExiting) {
393 spin_unlock(&server->req_lock);
398 * Can not count locking commands against total
399 * as they are allowed to block on server.
402 /* update # of requests on the wire to server */
403 if (timeout != CIFS_BLOCKING_OP) {
407 spin_unlock(&server->req_lock);
415 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
418 return wait_for_free_credits(server, timeout,
419 server->ops->get_credits_field(server, optype));
422 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
423 struct mid_q_entry **ppmidQ)
425 if (ses->server->tcpStatus == CifsExiting) {
429 if (ses->server->tcpStatus == CifsNeedReconnect) {
430 cFYI(1, "tcp session dead - return to caller to retry");
434 if (ses->status != CifsGood) {
435 /* check if SMB session is bad because we are setting it up */
436 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
437 (in_buf->Command != SMB_COM_NEGOTIATE))
439 /* else ok - we are setting up session */
441 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
444 spin_lock(&GlobalMid_Lock);
445 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
446 spin_unlock(&GlobalMid_Lock);
451 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
455 error = wait_event_freezekillable(server->response_q,
456 midQ->mid_state != MID_REQUEST_SUBMITTED);
464 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
467 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
468 struct mid_q_entry *mid;
470 /* enable signing if server requires it */
471 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
472 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
474 mid = AllocMidQEntry(hdr, server);
476 return ERR_PTR(-ENOMEM);
478 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
480 DeleteMidQEntry(mid);
488 * Send a SMB request and set the callback function in the mid to handle
489 * the result. Caller is responsible for dealing with timeouts.
492 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
493 mid_receive_t *receive, mid_callback_t *callback,
494 void *cbdata, const int flags)
496 int rc, timeout, optype;
497 struct mid_q_entry *mid;
499 timeout = flags & CIFS_TIMEOUT_MASK;
500 optype = flags & CIFS_OP_MASK;
502 rc = wait_for_free_request(server, timeout, optype);
506 mutex_lock(&server->srv_mutex);
507 mid = server->ops->setup_async_request(server, rqst);
509 mutex_unlock(&server->srv_mutex);
510 add_credits(server, 1, optype);
511 wake_up(&server->request_q);
515 mid->receive = receive;
516 mid->callback = callback;
517 mid->callback_data = cbdata;
518 mid->mid_state = MID_REQUEST_SUBMITTED;
520 /* put it on the pending_mid_q */
521 spin_lock(&GlobalMid_Lock);
522 list_add_tail(&mid->qhead, &server->pending_mid_q);
523 spin_unlock(&GlobalMid_Lock);
526 cifs_in_send_inc(server);
527 rc = smb_send_rqst(server, rqst);
528 cifs_in_send_dec(server);
529 cifs_save_when_sent(mid);
530 mutex_unlock(&server->srv_mutex);
535 cifs_delete_mid(mid);
536 add_credits(server, 1, optype);
537 wake_up(&server->request_q);
543 * Send an SMB Request. No response info (other than return code)
544 * needs to be parsed.
546 * flags indicate the type of request buffer and how long to wait
547 * and whether to log NT STATUS code (error) before mapping it to POSIX error
551 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
552 char *in_buf, int flags)
558 iov[0].iov_base = in_buf;
559 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
560 flags |= CIFS_NO_RESP;
561 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
562 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
568 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
572 cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
573 le16_to_cpu(mid->command), mid->mid, mid->mid_state);
575 spin_lock(&GlobalMid_Lock);
576 switch (mid->mid_state) {
577 case MID_RESPONSE_RECEIVED:
578 spin_unlock(&GlobalMid_Lock);
580 case MID_RETRY_NEEDED:
583 case MID_RESPONSE_MALFORMED:
590 list_del_init(&mid->qhead);
591 cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
592 mid->mid, mid->mid_state);
595 spin_unlock(&GlobalMid_Lock);
597 DeleteMidQEntry(mid);
602 send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
604 return server->ops->send_cancel ?
605 server->ops->send_cancel(server, buf, mid) : 0;
609 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
612 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
614 dump_smb(mid->resp_buf, min_t(u32, 92, len));
616 /* convert the length into a more usable form */
617 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
620 struct smb_rqst rqst = { .rq_iov = &iov,
623 iov.iov_base = mid->resp_buf;
625 /* FIXME: add code to kill session */
626 rc = cifs_verify_signature(&rqst, server,
627 mid->sequence_number + 1);
629 cERROR(1, "SMB signature verification returned error = "
633 /* BB special case reconnect tid and uid here? */
634 return map_smb_to_linux_error(mid->resp_buf, log_error);
638 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
641 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
642 struct mid_q_entry *mid;
644 rc = allocate_mid(ses, hdr, &mid);
647 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
649 cifs_delete_mid(mid);
656 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
657 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
662 struct mid_q_entry *midQ;
663 char *buf = iov[0].iov_base;
664 unsigned int credits = 1;
665 struct smb_rqst rqst = { .rq_iov = iov,
668 timeout = flags & CIFS_TIMEOUT_MASK;
669 optype = flags & CIFS_OP_MASK;
671 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
673 if ((ses == NULL) || (ses->server == NULL)) {
674 cifs_small_buf_release(buf);
675 cERROR(1, "Null session");
679 if (ses->server->tcpStatus == CifsExiting) {
680 cifs_small_buf_release(buf);
685 * Ensure that we do not send more than 50 overlapping requests
686 * to the same server. We may make this configurable later or
690 rc = wait_for_free_request(ses->server, timeout, optype);
692 cifs_small_buf_release(buf);
697 * Make sure that we sign in the same order that we send on this socket
698 * and avoid races inside tcp sendmsg code that could cause corruption
702 mutex_lock(&ses->server->srv_mutex);
704 midQ = ses->server->ops->setup_request(ses, &rqst);
706 mutex_unlock(&ses->server->srv_mutex);
707 cifs_small_buf_release(buf);
708 /* Update # of requests on wire to server */
709 add_credits(ses->server, 1, optype);
710 return PTR_ERR(midQ);
713 midQ->mid_state = MID_REQUEST_SUBMITTED;
714 cifs_in_send_inc(ses->server);
715 rc = smb_sendv(ses->server, iov, n_vec);
716 cifs_in_send_dec(ses->server);
717 cifs_save_when_sent(midQ);
719 mutex_unlock(&ses->server->srv_mutex);
722 cifs_small_buf_release(buf);
726 if (timeout == CIFS_ASYNC_OP) {
727 cifs_small_buf_release(buf);
731 rc = wait_for_response(ses->server, midQ);
733 send_cancel(ses->server, buf, midQ);
734 spin_lock(&GlobalMid_Lock);
735 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
736 midQ->callback = DeleteMidQEntry;
737 spin_unlock(&GlobalMid_Lock);
738 cifs_small_buf_release(buf);
739 add_credits(ses->server, 1, optype);
742 spin_unlock(&GlobalMid_Lock);
745 cifs_small_buf_release(buf);
747 rc = cifs_sync_mid_result(midQ, ses->server);
749 add_credits(ses->server, 1, optype);
753 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
755 cFYI(1, "Bad MID state?");
759 buf = (char *)midQ->resp_buf;
760 iov[0].iov_base = buf;
761 iov[0].iov_len = get_rfc1002_length(buf) + 4;
763 *resp_buf_type = CIFS_LARGE_BUFFER;
765 *resp_buf_type = CIFS_SMALL_BUFFER;
767 credits = ses->server->ops->get_credits(midQ);
769 rc = ses->server->ops->check_receive(midQ, ses->server,
770 flags & CIFS_LOG_ERROR);
772 /* mark it so buf will not be freed by cifs_delete_mid */
773 if ((flags & CIFS_NO_RESP) == 0)
774 midQ->resp_buf = NULL;
776 cifs_delete_mid(midQ);
777 add_credits(ses->server, credits, optype);
783 SendReceive(const unsigned int xid, struct cifs_ses *ses,
784 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
785 int *pbytes_returned, const int timeout)
788 struct mid_q_entry *midQ;
791 cERROR(1, "Null smb session");
794 if (ses->server == NULL) {
795 cERROR(1, "Null tcp session");
799 if (ses->server->tcpStatus == CifsExiting)
802 /* Ensure that we do not send more than 50 overlapping requests
803 to the same server. We may make this configurable later or
806 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
807 MAX_CIFS_HDR_SIZE - 4) {
808 cERROR(1, "Illegal length, greater than maximum frame, %d",
809 be32_to_cpu(in_buf->smb_buf_length));
813 rc = wait_for_free_request(ses->server, timeout, 0);
817 /* make sure that we sign in the same order that we send on this socket
818 and avoid races inside tcp sendmsg code that could cause corruption
821 mutex_lock(&ses->server->srv_mutex);
823 rc = allocate_mid(ses, in_buf, &midQ);
825 mutex_unlock(&ses->server->srv_mutex);
826 /* Update # of requests on wire to server */
827 add_credits(ses->server, 1, 0);
831 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
833 mutex_unlock(&ses->server->srv_mutex);
837 midQ->mid_state = MID_REQUEST_SUBMITTED;
839 cifs_in_send_inc(ses->server);
840 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
841 cifs_in_send_dec(ses->server);
842 cifs_save_when_sent(midQ);
843 mutex_unlock(&ses->server->srv_mutex);
848 if (timeout == CIFS_ASYNC_OP)
851 rc = wait_for_response(ses->server, midQ);
853 send_cancel(ses->server, in_buf, midQ);
854 spin_lock(&GlobalMid_Lock);
855 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
856 /* no longer considered to be "in-flight" */
857 midQ->callback = DeleteMidQEntry;
858 spin_unlock(&GlobalMid_Lock);
859 add_credits(ses->server, 1, 0);
862 spin_unlock(&GlobalMid_Lock);
865 rc = cifs_sync_mid_result(midQ, ses->server);
867 add_credits(ses->server, 1, 0);
871 if (!midQ->resp_buf || !out_buf ||
872 midQ->mid_state != MID_RESPONSE_RECEIVED) {
874 cERROR(1, "Bad MID state?");
878 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
879 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
880 rc = cifs_check_receive(midQ, ses->server, 0);
882 cifs_delete_mid(midQ);
883 add_credits(ses->server, 1, 0);
888 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
889 blocking lock to return. */
892 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
893 struct smb_hdr *in_buf,
894 struct smb_hdr *out_buf)
897 struct cifs_ses *ses = tcon->ses;
898 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
900 /* We just modify the current in_buf to change
901 the type of lock from LOCKING_ANDX_SHARED_LOCK
902 or LOCKING_ANDX_EXCLUSIVE_LOCK to
903 LOCKING_ANDX_CANCEL_LOCK. */
905 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
907 pSMB->hdr.Mid = get_next_mid(ses->server);
909 return SendReceive(xid, ses, in_buf, out_buf,
914 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
915 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
916 int *pbytes_returned)
920 struct mid_q_entry *midQ;
921 struct cifs_ses *ses;
923 if (tcon == NULL || tcon->ses == NULL) {
924 cERROR(1, "Null smb session");
929 if (ses->server == NULL) {
930 cERROR(1, "Null tcp session");
934 if (ses->server->tcpStatus == CifsExiting)
937 /* Ensure that we do not send more than 50 overlapping requests
938 to the same server. We may make this configurable later or
941 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
942 MAX_CIFS_HDR_SIZE - 4) {
943 cERROR(1, "Illegal length, greater than maximum frame, %d",
944 be32_to_cpu(in_buf->smb_buf_length));
948 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
952 /* make sure that we sign in the same order that we send on this socket
953 and avoid races inside tcp sendmsg code that could cause corruption
956 mutex_lock(&ses->server->srv_mutex);
958 rc = allocate_mid(ses, in_buf, &midQ);
960 mutex_unlock(&ses->server->srv_mutex);
964 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
966 cifs_delete_mid(midQ);
967 mutex_unlock(&ses->server->srv_mutex);
971 midQ->mid_state = MID_REQUEST_SUBMITTED;
972 cifs_in_send_inc(ses->server);
973 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
974 cifs_in_send_dec(ses->server);
975 cifs_save_when_sent(midQ);
976 mutex_unlock(&ses->server->srv_mutex);
979 cifs_delete_mid(midQ);
983 /* Wait for a reply - allow signals to interrupt. */
984 rc = wait_event_interruptible(ses->server->response_q,
985 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
986 ((ses->server->tcpStatus != CifsGood) &&
987 (ses->server->tcpStatus != CifsNew)));
989 /* Were we interrupted by a signal ? */
990 if ((rc == -ERESTARTSYS) &&
991 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
992 ((ses->server->tcpStatus == CifsGood) ||
993 (ses->server->tcpStatus == CifsNew))) {
995 if (in_buf->Command == SMB_COM_TRANSACTION2) {
996 /* POSIX lock. We send a NT_CANCEL SMB to cause the
997 blocking lock to return. */
998 rc = send_cancel(ses->server, in_buf, midQ);
1000 cifs_delete_mid(midQ);
1004 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1005 to cause the blocking lock to return. */
1007 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1009 /* If we get -ENOLCK back the lock may have
1010 already been removed. Don't exit in this case. */
1011 if (rc && rc != -ENOLCK) {
1012 cifs_delete_mid(midQ);
1017 rc = wait_for_response(ses->server, midQ);
1019 send_cancel(ses->server, in_buf, midQ);
1020 spin_lock(&GlobalMid_Lock);
1021 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1022 /* no longer considered to be "in-flight" */
1023 midQ->callback = DeleteMidQEntry;
1024 spin_unlock(&GlobalMid_Lock);
1027 spin_unlock(&GlobalMid_Lock);
1030 /* We got the response - restart system call. */
1034 rc = cifs_sync_mid_result(midQ, ses->server);
1038 /* rcvd frame is ok */
1039 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1041 cERROR(1, "Bad MID state?");
1045 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1046 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1047 rc = cifs_check_receive(midQ, ses->server, 0);
1049 cifs_delete_mid(midQ);
1050 if (rstart && rc == -EACCES)
1051 return -ERESTARTSYS;