OSDN Git Service

cifs: prevent starvation in wait_for_free_credits for multi-credit requests
[uclinux-h8/linux.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         temp->callback = cifs_wake_up_task;
80         temp->callback_data = current;
81
82         atomic_inc(&midCount);
83         temp->mid_state = MID_REQUEST_ALLOCATED;
84         return temp;
85 }
86
87 static void _cifs_mid_q_entry_release(struct kref *refcount)
88 {
89         struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90                                                refcount);
91
92         mempool_free(mid, cifs_mid_poolp);
93 }
94
95 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96 {
97         spin_lock(&GlobalMid_Lock);
98         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99         spin_unlock(&GlobalMid_Lock);
100 }
101
102 void
103 DeleteMidQEntry(struct mid_q_entry *midEntry)
104 {
105 #ifdef CONFIG_CIFS_STATS2
106         __le16 command = midEntry->server->vals->lock_cmd;
107         unsigned long now;
108 #endif
109         midEntry->mid_state = MID_FREE;
110         atomic_dec(&midCount);
111         if (midEntry->large_buf)
112                 cifs_buf_release(midEntry->resp_buf);
113         else
114                 cifs_small_buf_release(midEntry->resp_buf);
115 #ifdef CONFIG_CIFS_STATS2
116         now = jiffies;
117         /*
118          * commands taking longer than one second (default) can be indications
119          * that something is wrong, unless it is quite a slow link or a very
120          * busy server. Note that this calc is unlikely or impossible to wrap
121          * as long as slow_rsp_threshold is not set way above recommended max
122          * value (32767 ie 9 hours) and is generally harmless even if wrong
123          * since only affects debug counters - so leaving the calc as simple
124          * comparison rather than doing multiple conversions and overflow
125          * checks
126          */
127         if ((slow_rsp_threshold != 0) &&
128             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
129             (midEntry->command != command)) {
130                 /*
131                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
132                  * NB: le16_to_cpu returns unsigned so can not be negative below
133                  */
134                 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
135                         cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
136
137                 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
138                                midEntry->mid, midEntry->pid,
139                                midEntry->when_sent, midEntry->when_received);
140                 if (cifsFYI & CIFS_TIMER) {
141                         pr_debug(" CIFS slow rsp: cmd %d mid %llu",
142                                midEntry->command, midEntry->mid);
143                         cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
144                                now - midEntry->when_alloc,
145                                now - midEntry->when_sent,
146                                now - midEntry->when_received);
147                 }
148         }
149 #endif
150         cifs_mid_q_entry_release(midEntry);
151 }
152
153 void
154 cifs_delete_mid(struct mid_q_entry *mid)
155 {
156         spin_lock(&GlobalMid_Lock);
157         list_del_init(&mid->qhead);
158         mid->mid_flags |= MID_DELETED;
159         spin_unlock(&GlobalMid_Lock);
160
161         DeleteMidQEntry(mid);
162 }
163
164 /*
165  * smb_send_kvec - send an array of kvecs to the server
166  * @server:     Server to send the data to
167  * @smb_msg:    Message to send
168  * @sent:       amount of data sent on socket is stored here
169  *
170  * Our basic "send data to server" function. Should be called with srv_mutex
171  * held. The caller is responsible for handling the results.
172  */
173 static int
174 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
175               size_t *sent)
176 {
177         int rc = 0;
178         int retries = 0;
179         struct socket *ssocket = server->ssocket;
180
181         *sent = 0;
182
183         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
184         smb_msg->msg_namelen = sizeof(struct sockaddr);
185         smb_msg->msg_control = NULL;
186         smb_msg->msg_controllen = 0;
187         if (server->noblocksnd)
188                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
189         else
190                 smb_msg->msg_flags = MSG_NOSIGNAL;
191
192         while (msg_data_left(smb_msg)) {
193                 /*
194                  * If blocking send, we try 3 times, since each can block
195                  * for 5 seconds. For nonblocking  we have to try more
196                  * but wait increasing amounts of time allowing time for
197                  * socket to clear.  The overall time we wait in either
198                  * case to send on the socket is about 15 seconds.
199                  * Similarly we wait for 15 seconds for a response from
200                  * the server in SendReceive[2] for the server to send
201                  * a response back for most types of requests (except
202                  * SMB Write past end of file which can be slow, and
203                  * blocking lock operations). NFS waits slightly longer
204                  * than CIFS, but this can make it take longer for
205                  * nonresponsive servers to be detected and 15 seconds
206                  * is more than enough time for modern networks to
207                  * send a packet.  In most cases if we fail to send
208                  * after the retries we will kill the socket and
209                  * reconnect which may clear the network problem.
210                  */
211                 rc = sock_sendmsg(ssocket, smb_msg);
212                 if (rc == -EAGAIN) {
213                         retries++;
214                         if (retries >= 14 ||
215                             (!server->noblocksnd && (retries > 2))) {
216                                 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
217                                          ssocket);
218                                 return -EAGAIN;
219                         }
220                         msleep(1 << retries);
221                         continue;
222                 }
223
224                 if (rc < 0)
225                         return rc;
226
227                 if (rc == 0) {
228                         /* should never happen, letting socket clear before
229                            retrying is our only obvious option here */
230                         cifs_dbg(VFS, "tcp sent no data\n");
231                         msleep(500);
232                         continue;
233                 }
234
235                 /* send was at least partially successful */
236                 *sent += rc;
237                 retries = 0; /* in case we get ENOSPC on the next send */
238         }
239         return 0;
240 }
241
242 unsigned long
243 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
244 {
245         unsigned int i;
246         struct kvec *iov;
247         int nvec;
248         unsigned long buflen = 0;
249
250         if (server->vals->header_preamble_size == 0 &&
251             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
252                 iov = &rqst->rq_iov[1];
253                 nvec = rqst->rq_nvec - 1;
254         } else {
255                 iov = rqst->rq_iov;
256                 nvec = rqst->rq_nvec;
257         }
258
259         /* total up iov array first */
260         for (i = 0; i < nvec; i++)
261                 buflen += iov[i].iov_len;
262
263         /*
264          * Add in the page array if there is one. The caller needs to make
265          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
266          * multiple pages ends at page boundary, rq_tailsz needs to be set to
267          * PAGE_SIZE.
268          */
269         if (rqst->rq_npages) {
270                 if (rqst->rq_npages == 1)
271                         buflen += rqst->rq_tailsz;
272                 else {
273                         /*
274                          * If there is more than one page, calculate the
275                          * buffer length based on rq_offset and rq_tailsz
276                          */
277                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
278                                         rqst->rq_offset;
279                         buflen += rqst->rq_tailsz;
280                 }
281         }
282
283         return buflen;
284 }
285
286 static int
287 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
288                 struct smb_rqst *rqst)
289 {
290         int rc = 0;
291         struct kvec *iov;
292         int n_vec;
293         unsigned int send_length = 0;
294         unsigned int i, j;
295         sigset_t mask, oldmask;
296         size_t total_len = 0, sent, size;
297         struct socket *ssocket = server->ssocket;
298         struct msghdr smb_msg;
299         int val = 1;
300         __be32 rfc1002_marker;
301
302         if (cifs_rdma_enabled(server) && server->smbd_conn) {
303                 rc = smbd_send(server, rqst);
304                 goto smbd_done;
305         }
306
307         if (ssocket == NULL)
308                 return -EAGAIN;
309
310         if (signal_pending(current)) {
311                 cifs_dbg(FYI, "signal is pending before sending any data\n");
312                 return -EINTR;
313         }
314
315         /* cork the socket */
316         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
317                                 (char *)&val, sizeof(val));
318
319         for (j = 0; j < num_rqst; j++)
320                 send_length += smb_rqst_len(server, &rqst[j]);
321         rfc1002_marker = cpu_to_be32(send_length);
322
323         /*
324          * We should not allow signals to interrupt the network send because
325          * any partial send will cause session reconnects thus increasing
326          * latency of system calls and overload a server with unnecessary
327          * requests.
328          */
329
330         sigfillset(&mask);
331         sigprocmask(SIG_BLOCK, &mask, &oldmask);
332
333         /* Generate a rfc1002 marker for SMB2+ */
334         if (server->vals->header_preamble_size == 0) {
335                 struct kvec hiov = {
336                         .iov_base = &rfc1002_marker,
337                         .iov_len  = 4
338                 };
339                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
340                 rc = smb_send_kvec(server, &smb_msg, &sent);
341                 if (rc < 0)
342                         goto unmask;
343
344                 total_len += sent;
345                 send_length += 4;
346         }
347
348         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
349
350         for (j = 0; j < num_rqst; j++) {
351                 iov = rqst[j].rq_iov;
352                 n_vec = rqst[j].rq_nvec;
353
354                 size = 0;
355                 for (i = 0; i < n_vec; i++) {
356                         dump_smb(iov[i].iov_base, iov[i].iov_len);
357                         size += iov[i].iov_len;
358                 }
359
360                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
361
362                 rc = smb_send_kvec(server, &smb_msg, &sent);
363                 if (rc < 0)
364                         goto unmask;
365
366                 total_len += sent;
367
368                 /* now walk the page array and send each page in it */
369                 for (i = 0; i < rqst[j].rq_npages; i++) {
370                         struct bio_vec bvec;
371
372                         bvec.bv_page = rqst[j].rq_pages[i];
373                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
374                                              &bvec.bv_offset);
375
376                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
377                                       &bvec, 1, bvec.bv_len);
378                         rc = smb_send_kvec(server, &smb_msg, &sent);
379                         if (rc < 0)
380                                 break;
381
382                         total_len += sent;
383                 }
384         }
385
386 unmask:
387         sigprocmask(SIG_SETMASK, &oldmask, NULL);
388
389         /*
390          * If signal is pending but we have already sent the whole packet to
391          * the server we need to return success status to allow a corresponding
392          * mid entry to be kept in the pending requests queue thus allowing
393          * to handle responses from the server by the client.
394          *
395          * If only part of the packet has been sent there is no need to hide
396          * interrupt because the session will be reconnected anyway, so there
397          * won't be any response from the server to handle.
398          */
399
400         if (signal_pending(current) && (total_len != send_length)) {
401                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
402                 rc = -EINTR;
403         }
404
405         /* uncork it */
406         val = 0;
407         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
408                                 (char *)&val, sizeof(val));
409
410         if ((total_len > 0) && (total_len != send_length)) {
411                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
412                          send_length, total_len);
413                 /*
414                  * If we have only sent part of an SMB then the next SMB could
415                  * be taken as the remainder of this one. We need to kill the
416                  * socket so the server throws away the partial SMB
417                  */
418                 server->tcpStatus = CifsNeedReconnect;
419                 trace_smb3_partial_send_reconnect(server->CurrentMid,
420                                                   server->hostname);
421         }
422 smbd_done:
423         if (rc < 0 && rc != -EINTR)
424                 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
425                          rc);
426         else if (rc > 0)
427                 rc = 0;
428
429         return rc;
430 }
431
432 static int
433 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
434               struct smb_rqst *rqst, int flags)
435 {
436         struct kvec iov;
437         struct smb2_transform_hdr tr_hdr;
438         struct smb_rqst cur_rqst[MAX_COMPOUND];
439         int rc;
440
441         if (!(flags & CIFS_TRANSFORM_REQ))
442                 return __smb_send_rqst(server, num_rqst, rqst);
443
444         if (num_rqst > MAX_COMPOUND - 1)
445                 return -ENOMEM;
446
447         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
448         memset(&iov, 0, sizeof(iov));
449         memset(&tr_hdr, 0, sizeof(tr_hdr));
450
451         iov.iov_base = &tr_hdr;
452         iov.iov_len = sizeof(tr_hdr);
453         cur_rqst[0].rq_iov = &iov;
454         cur_rqst[0].rq_nvec = 1;
455
456         if (!server->ops->init_transform_rq) {
457                 cifs_dbg(VFS, "Encryption requested but transform callback "
458                          "is missing\n");
459                 return -EIO;
460         }
461
462         rc = server->ops->init_transform_rq(server, num_rqst + 1,
463                                             &cur_rqst[0], rqst);
464         if (rc)
465                 return rc;
466
467         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
468         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
469         return rc;
470 }
471
472 int
473 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
474          unsigned int smb_buf_length)
475 {
476         struct kvec iov[2];
477         struct smb_rqst rqst = { .rq_iov = iov,
478                                  .rq_nvec = 2 };
479
480         iov[0].iov_base = smb_buffer;
481         iov[0].iov_len = 4;
482         iov[1].iov_base = (char *)smb_buffer + 4;
483         iov[1].iov_len = smb_buf_length;
484
485         return __smb_send_rqst(server, 1, &rqst);
486 }
487
488 static int
489 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
490                       const int flags, unsigned int *instance)
491 {
492         int rc;
493         int *credits;
494         int optype;
495
496         optype = flags & CIFS_OP_MASK;
497
498         *instance = 0;
499
500         credits = server->ops->get_credits_field(server, optype);
501         /* Since an echo is already inflight, no need to wait to send another */
502         if (*credits <= 0 && optype == CIFS_ECHO_OP)
503                 return -EAGAIN;
504
505         spin_lock(&server->req_lock);
506         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP) {
507                 /* oplock breaks must not be held up */
508                 server->in_flight++;
509                 *credits -= 1;
510                 *instance = server->reconnect_instance;
511                 spin_unlock(&server->req_lock);
512                 return 0;
513         }
514
515         while (1) {
516                 if (*credits < num_credits) {
517                         spin_unlock(&server->req_lock);
518                         cifs_num_waiters_inc(server);
519                         rc = wait_event_killable(server->request_q,
520                                 has_credits(server, credits, num_credits));
521                         cifs_num_waiters_dec(server);
522                         if (rc)
523                                 return rc;
524                         spin_lock(&server->req_lock);
525                 } else {
526                         if (server->tcpStatus == CifsExiting) {
527                                 spin_unlock(&server->req_lock);
528                                 return -ENOENT;
529                         }
530
531                         /*
532                          * For normal commands, reserve the last MAX_COMPOUND
533                          * credits to compound requests.
534                          * Otherwise these compounds could be permanently
535                          * starved for credits by single-credit requests.
536                          *
537                          * To prevent spinning CPU, block this thread until
538                          * there are >MAX_COMPOUND credits available.
539                          * But only do this is we already have a lot of
540                          * credits in flight to avoid triggering this check
541                          * for servers that are slow to hand out credits on
542                          * new sessions.
543                          */
544                         if (!optype && num_credits == 1 &&
545                             server->in_flight > 2 * MAX_COMPOUND &&
546                             *credits <= MAX_COMPOUND) {
547                                 spin_unlock(&server->req_lock);
548                                 cifs_num_waiters_inc(server);
549                                 rc = wait_event_killable(server->request_q,
550                                         has_credits(server, credits,
551                                                     MAX_COMPOUND + 1));
552                                 cifs_num_waiters_dec(server);
553                                 if (rc)
554                                         return rc;
555                                 spin_lock(&server->req_lock);
556                                 continue;
557                         }
558
559                         /*
560                          * Can not count locking commands against total
561                          * as they are allowed to block on server.
562                          */
563
564                         /* update # of requests on the wire to server */
565                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
566                                 *credits -= num_credits;
567                                 server->in_flight += num_credits;
568                                 *instance = server->reconnect_instance;
569                         }
570                         spin_unlock(&server->req_lock);
571                         break;
572                 }
573         }
574         return 0;
575 }
576
577 static int
578 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
579                       unsigned int *instance)
580 {
581         return wait_for_free_credits(server, 1, flags, instance);
582 }
583
584 int
585 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
586                       unsigned int *num, struct cifs_credits *credits)
587 {
588         *num = size;
589         credits->value = 0;
590         credits->instance = server->reconnect_instance;
591         return 0;
592 }
593
594 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
595                         struct mid_q_entry **ppmidQ)
596 {
597         if (ses->server->tcpStatus == CifsExiting) {
598                 return -ENOENT;
599         }
600
601         if (ses->server->tcpStatus == CifsNeedReconnect) {
602                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
603                 return -EAGAIN;
604         }
605
606         if (ses->status == CifsNew) {
607                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
608                         (in_buf->Command != SMB_COM_NEGOTIATE))
609                         return -EAGAIN;
610                 /* else ok - we are setting up session */
611         }
612
613         if (ses->status == CifsExiting) {
614                 /* check if SMB session is bad because we are setting it up */
615                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
616                         return -EAGAIN;
617                 /* else ok - we are shutting down session */
618         }
619
620         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
621         if (*ppmidQ == NULL)
622                 return -ENOMEM;
623         spin_lock(&GlobalMid_Lock);
624         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
625         spin_unlock(&GlobalMid_Lock);
626         return 0;
627 }
628
629 static int
630 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
631 {
632         int error;
633
634         error = wait_event_freezekillable_unsafe(server->response_q,
635                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
636         if (error < 0)
637                 return -ERESTARTSYS;
638
639         return 0;
640 }
641
642 struct mid_q_entry *
643 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
644 {
645         int rc;
646         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
647         struct mid_q_entry *mid;
648
649         if (rqst->rq_iov[0].iov_len != 4 ||
650             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
651                 return ERR_PTR(-EIO);
652
653         /* enable signing if server requires it */
654         if (server->sign)
655                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
656
657         mid = AllocMidQEntry(hdr, server);
658         if (mid == NULL)
659                 return ERR_PTR(-ENOMEM);
660
661         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
662         if (rc) {
663                 DeleteMidQEntry(mid);
664                 return ERR_PTR(rc);
665         }
666
667         return mid;
668 }
669
670 /*
671  * Send a SMB request and set the callback function in the mid to handle
672  * the result. Caller is responsible for dealing with timeouts.
673  */
674 int
675 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
676                 mid_receive_t *receive, mid_callback_t *callback,
677                 mid_handle_t *handle, void *cbdata, const int flags,
678                 const struct cifs_credits *exist_credits)
679 {
680         int rc;
681         struct mid_q_entry *mid;
682         struct cifs_credits credits = { .value = 0, .instance = 0 };
683         unsigned int instance;
684         int optype;
685
686         optype = flags & CIFS_OP_MASK;
687
688         if ((flags & CIFS_HAS_CREDITS) == 0) {
689                 rc = wait_for_free_request(server, flags, &instance);
690                 if (rc)
691                         return rc;
692                 credits.value = 1;
693                 credits.instance = instance;
694         } else
695                 instance = exist_credits->instance;
696
697         mutex_lock(&server->srv_mutex);
698
699         /*
700          * We can't use credits obtained from the previous session to send this
701          * request. Check if there were reconnects after we obtained credits and
702          * return -EAGAIN in such cases to let callers handle it.
703          */
704         if (instance != server->reconnect_instance) {
705                 mutex_unlock(&server->srv_mutex);
706                 add_credits_and_wake_if(server, &credits, optype);
707                 return -EAGAIN;
708         }
709
710         mid = server->ops->setup_async_request(server, rqst);
711         if (IS_ERR(mid)) {
712                 mutex_unlock(&server->srv_mutex);
713                 add_credits_and_wake_if(server, &credits, optype);
714                 return PTR_ERR(mid);
715         }
716
717         mid->receive = receive;
718         mid->callback = callback;
719         mid->callback_data = cbdata;
720         mid->handle = handle;
721         mid->mid_state = MID_REQUEST_SUBMITTED;
722
723         /* put it on the pending_mid_q */
724         spin_lock(&GlobalMid_Lock);
725         list_add_tail(&mid->qhead, &server->pending_mid_q);
726         spin_unlock(&GlobalMid_Lock);
727
728         /*
729          * Need to store the time in mid before calling I/O. For call_async,
730          * I/O response may come back and free the mid entry on another thread.
731          */
732         cifs_save_when_sent(mid);
733         cifs_in_send_inc(server);
734         rc = smb_send_rqst(server, 1, rqst, flags);
735         cifs_in_send_dec(server);
736
737         if (rc < 0) {
738                 revert_current_mid(server, mid->credits);
739                 server->sequence_number -= 2;
740                 cifs_delete_mid(mid);
741         }
742
743         mutex_unlock(&server->srv_mutex);
744
745         if (rc == 0)
746                 return 0;
747
748         add_credits_and_wake_if(server, &credits, optype);
749         return rc;
750 }
751
752 /*
753  *
754  * Send an SMB Request.  No response info (other than return code)
755  * needs to be parsed.
756  *
757  * flags indicate the type of request buffer and how long to wait
758  * and whether to log NT STATUS code (error) before mapping it to POSIX error
759  *
760  */
761 int
762 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
763                  char *in_buf, int flags)
764 {
765         int rc;
766         struct kvec iov[1];
767         struct kvec rsp_iov;
768         int resp_buf_type;
769
770         iov[0].iov_base = in_buf;
771         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
772         flags |= CIFS_NO_RESP;
773         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
774         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
775
776         return rc;
777 }
778
779 static int
780 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
781 {
782         int rc = 0;
783
784         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
785                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
786
787         spin_lock(&GlobalMid_Lock);
788         switch (mid->mid_state) {
789         case MID_RESPONSE_RECEIVED:
790                 spin_unlock(&GlobalMid_Lock);
791                 return rc;
792         case MID_RETRY_NEEDED:
793                 rc = -EAGAIN;
794                 break;
795         case MID_RESPONSE_MALFORMED:
796                 rc = -EIO;
797                 break;
798         case MID_SHUTDOWN:
799                 rc = -EHOSTDOWN;
800                 break;
801         default:
802                 list_del_init(&mid->qhead);
803                 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
804                          __func__, mid->mid, mid->mid_state);
805                 rc = -EIO;
806         }
807         spin_unlock(&GlobalMid_Lock);
808
809         DeleteMidQEntry(mid);
810         return rc;
811 }
812
813 static inline int
814 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
815             struct mid_q_entry *mid)
816 {
817         return server->ops->send_cancel ?
818                                 server->ops->send_cancel(server, rqst, mid) : 0;
819 }
820
821 int
822 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
823                    bool log_error)
824 {
825         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826
827         dump_smb(mid->resp_buf, min_t(u32, 92, len));
828
829         /* convert the length into a more usable form */
830         if (server->sign) {
831                 struct kvec iov[2];
832                 int rc = 0;
833                 struct smb_rqst rqst = { .rq_iov = iov,
834                                          .rq_nvec = 2 };
835
836                 iov[0].iov_base = mid->resp_buf;
837                 iov[0].iov_len = 4;
838                 iov[1].iov_base = (char *)mid->resp_buf + 4;
839                 iov[1].iov_len = len - 4;
840                 /* FIXME: add code to kill session */
841                 rc = cifs_verify_signature(&rqst, server,
842                                            mid->sequence_number);
843                 if (rc)
844                         cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
845                                  rc);
846         }
847
848         /* BB special case reconnect tid and uid here? */
849         return map_smb_to_linux_error(mid->resp_buf, log_error);
850 }
851
852 struct mid_q_entry *
853 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
854 {
855         int rc;
856         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
857         struct mid_q_entry *mid;
858
859         if (rqst->rq_iov[0].iov_len != 4 ||
860             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
861                 return ERR_PTR(-EIO);
862
863         rc = allocate_mid(ses, hdr, &mid);
864         if (rc)
865                 return ERR_PTR(rc);
866         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
867         if (rc) {
868                 cifs_delete_mid(mid);
869                 return ERR_PTR(rc);
870         }
871         return mid;
872 }
873
874 static void
875 cifs_compound_callback(struct mid_q_entry *mid)
876 {
877         struct TCP_Server_Info *server = mid->server;
878         struct cifs_credits credits;
879
880         credits.value = server->ops->get_credits(mid);
881         credits.instance = server->reconnect_instance;
882
883         add_credits(server, &credits, mid->optype);
884 }
885
886 static void
887 cifs_compound_last_callback(struct mid_q_entry *mid)
888 {
889         cifs_compound_callback(mid);
890         cifs_wake_up_task(mid);
891 }
892
893 static void
894 cifs_cancelled_callback(struct mid_q_entry *mid)
895 {
896         cifs_compound_callback(mid);
897         DeleteMidQEntry(mid);
898 }
899
900 int
901 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
902                    const int flags, const int num_rqst, struct smb_rqst *rqst,
903                    int *resp_buf_type, struct kvec *resp_iov)
904 {
905         int i, j, optype, rc = 0;
906         struct mid_q_entry *midQ[MAX_COMPOUND];
907         bool cancelled_mid[MAX_COMPOUND] = {false};
908         struct cifs_credits credits[MAX_COMPOUND] = {
909                 { .value = 0, .instance = 0 }
910         };
911         unsigned int instance;
912         unsigned int first_instance = 0;
913         char *buf;
914
915         optype = flags & CIFS_OP_MASK;
916
917         for (i = 0; i < num_rqst; i++)
918                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
919
920         if ((ses == NULL) || (ses->server == NULL)) {
921                 cifs_dbg(VFS, "Null session\n");
922                 return -EIO;
923         }
924
925         if (ses->server->tcpStatus == CifsExiting)
926                 return -ENOENT;
927
928         spin_lock(&ses->server->req_lock);
929         if (ses->server->credits < num_rqst) {
930                 /*
931                  * Return immediately if not too many requests in flight since
932                  * we will likely be stuck on waiting for credits.
933                  */
934                 if (ses->server->in_flight < num_rqst - ses->server->credits) {
935                         spin_unlock(&ses->server->req_lock);
936                         return -ENOTSUPP;
937                 }
938         } else {
939                 /* enough credits to send the whole compounded request */
940                 ses->server->credits -= num_rqst;
941                 ses->server->in_flight += num_rqst;
942                 first_instance = ses->server->reconnect_instance;
943         }
944         spin_unlock(&ses->server->req_lock);
945
946         if (first_instance) {
947                 cifs_dbg(FYI, "Acquired %d credits at once\n", num_rqst);
948                 for (i = 0; i < num_rqst; i++) {
949                         credits[i].value = 1;
950                         credits[i].instance = first_instance;
951                 }
952                 goto setup_rqsts;
953         }
954
955         /*
956          * There are not enough credits to send the whole compound request but
957          * there are requests in flight that may bring credits from the server.
958          * This approach still leaves the possibility to be stuck waiting for
959          * credits if the server doesn't grant credits to the outstanding
960          * requests. This should be fixed by returning immediately and letting
961          * a caller fallback to sequential commands instead of compounding.
962          * Ensure we obtain 1 credit per request in the compound chain.
963          */
964         for (i = 0; i < num_rqst; i++) {
965                 rc = wait_for_free_request(ses->server, flags, &instance);
966
967                 if (rc == 0) {
968                         credits[i].value = 1;
969                         credits[i].instance = instance;
970                         /*
971                          * All parts of the compound chain must get credits from
972                          * the same session, otherwise we may end up using more
973                          * credits than the server granted. If there were
974                          * reconnects in between, return -EAGAIN and let callers
975                          * handle it.
976                          */
977                         if (i == 0)
978                                 first_instance = instance;
979                         else if (first_instance != instance) {
980                                 i++;
981                                 rc = -EAGAIN;
982                         }
983                 }
984
985                 if (rc) {
986                         /*
987                          * We haven't sent an SMB packet to the server yet but
988                          * we already obtained credits for i requests in the
989                          * compound chain - need to return those credits back
990                          * for future use. Note that we need to call add_credits
991                          * multiple times to match the way we obtained credits
992                          * in the first place and to account for in flight
993                          * requests correctly.
994                          */
995                         for (j = 0; j < i; j++)
996                                 add_credits(ses->server, &credits[j], optype);
997                         return rc;
998                 }
999         }
1000
1001 setup_rqsts:
1002         /*
1003          * Make sure that we sign in the same order that we send on this socket
1004          * and avoid races inside tcp sendmsg code that could cause corruption
1005          * of smb data.
1006          */
1007
1008         mutex_lock(&ses->server->srv_mutex);
1009
1010         /*
1011          * All the parts of the compound chain belong obtained credits from the
1012          * same session (see the appropriate checks above). In the same time
1013          * there might be reconnects after those checks but before we acquired
1014          * the srv_mutex. We can not use credits obtained from the previous
1015          * session to send this request. Check if there were reconnects after
1016          * we obtained credits and return -EAGAIN in such cases to let callers
1017          * handle it.
1018          */
1019         if (first_instance != ses->server->reconnect_instance) {
1020                 mutex_unlock(&ses->server->srv_mutex);
1021                 for (j = 0; j < num_rqst; j++)
1022                         add_credits(ses->server, &credits[j], optype);
1023                 return -EAGAIN;
1024         }
1025
1026         for (i = 0; i < num_rqst; i++) {
1027                 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
1028                 if (IS_ERR(midQ[i])) {
1029                         revert_current_mid(ses->server, i);
1030                         for (j = 0; j < i; j++)
1031                                 cifs_delete_mid(midQ[j]);
1032                         mutex_unlock(&ses->server->srv_mutex);
1033
1034                         /* Update # of requests on wire to server */
1035                         for (j = 0; j < num_rqst; j++)
1036                                 add_credits(ses->server, &credits[j], optype);
1037                         return PTR_ERR(midQ[i]);
1038                 }
1039
1040                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1041                 midQ[i]->optype = optype;
1042                 /*
1043                  * Invoke callback for every part of the compound chain
1044                  * to calculate credits properly. Wake up this thread only when
1045                  * the last element is received.
1046                  */
1047                 if (i < num_rqst - 1)
1048                         midQ[i]->callback = cifs_compound_callback;
1049                 else
1050                         midQ[i]->callback = cifs_compound_last_callback;
1051         }
1052         cifs_in_send_inc(ses->server);
1053         rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
1054         cifs_in_send_dec(ses->server);
1055
1056         for (i = 0; i < num_rqst; i++)
1057                 cifs_save_when_sent(midQ[i]);
1058
1059         if (rc < 0) {
1060                 revert_current_mid(ses->server, num_rqst);
1061                 ses->server->sequence_number -= 2;
1062         }
1063
1064         mutex_unlock(&ses->server->srv_mutex);
1065
1066         if (rc < 0) {
1067                 /* Sending failed for some reason - return credits back */
1068                 for (i = 0; i < num_rqst; i++)
1069                         add_credits(ses->server, &credits[i], optype);
1070                 goto out;
1071         }
1072
1073         /*
1074          * At this point the request is passed to the network stack - we assume
1075          * that any credits taken from the server structure on the client have
1076          * been spent and we can't return them back. Once we receive responses
1077          * we will collect credits granted by the server in the mid callbacks
1078          * and add those credits to the server structure.
1079          */
1080
1081         /*
1082          * Compounding is never used during session establish.
1083          */
1084         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1085                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1086                                            rqst[0].rq_nvec);
1087
1088         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
1089                 goto out;
1090
1091         for (i = 0; i < num_rqst; i++) {
1092                 rc = wait_for_response(ses->server, midQ[i]);
1093                 if (rc != 0)
1094                         break;
1095         }
1096         if (rc != 0) {
1097                 for (; i < num_rqst; i++) {
1098                         cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1099                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1100                         send_cancel(ses->server, &rqst[i], midQ[i]);
1101                         spin_lock(&GlobalMid_Lock);
1102                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1103                                 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1104                                 midQ[i]->callback = cifs_cancelled_callback;
1105                                 cancelled_mid[i] = true;
1106                                 credits[i].value = 0;
1107                         }
1108                         spin_unlock(&GlobalMid_Lock);
1109                 }
1110         }
1111
1112         for (i = 0; i < num_rqst; i++) {
1113                 if (rc < 0)
1114                         goto out;
1115
1116                 rc = cifs_sync_mid_result(midQ[i], ses->server);
1117                 if (rc != 0) {
1118                         /* mark this mid as cancelled to not free it below */
1119                         cancelled_mid[i] = true;
1120                         goto out;
1121                 }
1122
1123                 if (!midQ[i]->resp_buf ||
1124                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1125                         rc = -EIO;
1126                         cifs_dbg(FYI, "Bad MID state?\n");
1127                         goto out;
1128                 }
1129
1130                 buf = (char *)midQ[i]->resp_buf;
1131                 resp_iov[i].iov_base = buf;
1132                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1133                         ses->server->vals->header_preamble_size;
1134
1135                 if (midQ[i]->large_buf)
1136                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1137                 else
1138                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1139
1140                 rc = ses->server->ops->check_receive(midQ[i], ses->server,
1141                                                      flags & CIFS_LOG_ERROR);
1142
1143                 /* mark it so buf will not be freed by cifs_delete_mid */
1144                 if ((flags & CIFS_NO_RESP) == 0)
1145                         midQ[i]->resp_buf = NULL;
1146
1147         }
1148
1149         /*
1150          * Compounding is never used during session establish.
1151          */
1152         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1153                 struct kvec iov = {
1154                         .iov_base = resp_iov[0].iov_base,
1155                         .iov_len = resp_iov[0].iov_len
1156                 };
1157                 smb311_update_preauth_hash(ses, &iov, 1);
1158         }
1159
1160 out:
1161         /*
1162          * This will dequeue all mids. After this it is important that the
1163          * demultiplex_thread will not process any of these mids any futher.
1164          * This is prevented above by using a noop callback that will not
1165          * wake this thread except for the very last PDU.
1166          */
1167         for (i = 0; i < num_rqst; i++) {
1168                 if (!cancelled_mid[i])
1169                         cifs_delete_mid(midQ[i]);
1170         }
1171
1172         return rc;
1173 }
1174
1175 int
1176 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1177                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1178                struct kvec *resp_iov)
1179 {
1180         return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1181                                   resp_iov);
1182 }
1183
1184 int
1185 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1186              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1187              const int flags, struct kvec *resp_iov)
1188 {
1189         struct smb_rqst rqst;
1190         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1191         int rc;
1192
1193         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1194                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1195                                         GFP_KERNEL);
1196                 if (!new_iov) {
1197                         /* otherwise cifs_send_recv below sets resp_buf_type */
1198                         *resp_buf_type = CIFS_NO_BUFFER;
1199                         return -ENOMEM;
1200                 }
1201         } else
1202                 new_iov = s_iov;
1203
1204         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1205         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1206
1207         new_iov[0].iov_base = new_iov[1].iov_base;
1208         new_iov[0].iov_len = 4;
1209         new_iov[1].iov_base += 4;
1210         new_iov[1].iov_len -= 4;
1211
1212         memset(&rqst, 0, sizeof(struct smb_rqst));
1213         rqst.rq_iov = new_iov;
1214         rqst.rq_nvec = n_vec + 1;
1215
1216         rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1217         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1218                 kfree(new_iov);
1219         return rc;
1220 }
1221
1222 int
1223 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1224             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1225             int *pbytes_returned, const int flags)
1226 {
1227         int rc = 0;
1228         struct mid_q_entry *midQ;
1229         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1230         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1231         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1232         struct cifs_credits credits = { .value = 1, .instance = 0 };
1233
1234         if (ses == NULL) {
1235                 cifs_dbg(VFS, "Null smb session\n");
1236                 return -EIO;
1237         }
1238         if (ses->server == NULL) {
1239                 cifs_dbg(VFS, "Null tcp session\n");
1240                 return -EIO;
1241         }
1242
1243         if (ses->server->tcpStatus == CifsExiting)
1244                 return -ENOENT;
1245
1246         /* Ensure that we do not send more than 50 overlapping requests
1247            to the same server. We may make this configurable later or
1248            use ses->maxReq */
1249
1250         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1251                 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1252                          len);
1253                 return -EIO;
1254         }
1255
1256         rc = wait_for_free_request(ses->server, flags, &credits.instance);
1257         if (rc)
1258                 return rc;
1259
1260         /* make sure that we sign in the same order that we send on this socket
1261            and avoid races inside tcp sendmsg code that could cause corruption
1262            of smb data */
1263
1264         mutex_lock(&ses->server->srv_mutex);
1265
1266         rc = allocate_mid(ses, in_buf, &midQ);
1267         if (rc) {
1268                 mutex_unlock(&ses->server->srv_mutex);
1269                 /* Update # of requests on wire to server */
1270                 add_credits(ses->server, &credits, 0);
1271                 return rc;
1272         }
1273
1274         rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1275         if (rc) {
1276                 mutex_unlock(&ses->server->srv_mutex);
1277                 goto out;
1278         }
1279
1280         midQ->mid_state = MID_REQUEST_SUBMITTED;
1281
1282         cifs_in_send_inc(ses->server);
1283         rc = smb_send(ses->server, in_buf, len);
1284         cifs_in_send_dec(ses->server);
1285         cifs_save_when_sent(midQ);
1286
1287         if (rc < 0)
1288                 ses->server->sequence_number -= 2;
1289
1290         mutex_unlock(&ses->server->srv_mutex);
1291
1292         if (rc < 0)
1293                 goto out;
1294
1295         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
1296                 goto out;
1297
1298         rc = wait_for_response(ses->server, midQ);
1299         if (rc != 0) {
1300                 send_cancel(ses->server, &rqst, midQ);
1301                 spin_lock(&GlobalMid_Lock);
1302                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1303                         /* no longer considered to be "in-flight" */
1304                         midQ->callback = DeleteMidQEntry;
1305                         spin_unlock(&GlobalMid_Lock);
1306                         add_credits(ses->server, &credits, 0);
1307                         return rc;
1308                 }
1309                 spin_unlock(&GlobalMid_Lock);
1310         }
1311
1312         rc = cifs_sync_mid_result(midQ, ses->server);
1313         if (rc != 0) {
1314                 add_credits(ses->server, &credits, 0);
1315                 return rc;
1316         }
1317
1318         if (!midQ->resp_buf || !out_buf ||
1319             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1320                 rc = -EIO;
1321                 cifs_dbg(VFS, "Bad MID state?\n");
1322                 goto out;
1323         }
1324
1325         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1326         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1327         rc = cifs_check_receive(midQ, ses->server, 0);
1328 out:
1329         cifs_delete_mid(midQ);
1330         add_credits(ses->server, &credits, 0);
1331
1332         return rc;
1333 }
1334
1335 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1336    blocking lock to return. */
1337
1338 static int
1339 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1340                         struct smb_hdr *in_buf,
1341                         struct smb_hdr *out_buf)
1342 {
1343         int bytes_returned;
1344         struct cifs_ses *ses = tcon->ses;
1345         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1346
1347         /* We just modify the current in_buf to change
1348            the type of lock from LOCKING_ANDX_SHARED_LOCK
1349            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1350            LOCKING_ANDX_CANCEL_LOCK. */
1351
1352         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1353         pSMB->Timeout = 0;
1354         pSMB->hdr.Mid = get_next_mid(ses->server);
1355
1356         return SendReceive(xid, ses, in_buf, out_buf,
1357                         &bytes_returned, 0);
1358 }
1359
1360 int
1361 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1362             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1363             int *pbytes_returned)
1364 {
1365         int rc = 0;
1366         int rstart = 0;
1367         struct mid_q_entry *midQ;
1368         struct cifs_ses *ses;
1369         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1370         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1371         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1372         unsigned int instance;
1373
1374         if (tcon == NULL || tcon->ses == NULL) {
1375                 cifs_dbg(VFS, "Null smb session\n");
1376                 return -EIO;
1377         }
1378         ses = tcon->ses;
1379
1380         if (ses->server == NULL) {
1381                 cifs_dbg(VFS, "Null tcp session\n");
1382                 return -EIO;
1383         }
1384
1385         if (ses->server->tcpStatus == CifsExiting)
1386                 return -ENOENT;
1387
1388         /* Ensure that we do not send more than 50 overlapping requests
1389            to the same server. We may make this configurable later or
1390            use ses->maxReq */
1391
1392         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1393                 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1394                          len);
1395                 return -EIO;
1396         }
1397
1398         rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
1399         if (rc)
1400                 return rc;
1401
1402         /* make sure that we sign in the same order that we send on this socket
1403            and avoid races inside tcp sendmsg code that could cause corruption
1404            of smb data */
1405
1406         mutex_lock(&ses->server->srv_mutex);
1407
1408         rc = allocate_mid(ses, in_buf, &midQ);
1409         if (rc) {
1410                 mutex_unlock(&ses->server->srv_mutex);
1411                 return rc;
1412         }
1413
1414         rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1415         if (rc) {
1416                 cifs_delete_mid(midQ);
1417                 mutex_unlock(&ses->server->srv_mutex);
1418                 return rc;
1419         }
1420
1421         midQ->mid_state = MID_REQUEST_SUBMITTED;
1422         cifs_in_send_inc(ses->server);
1423         rc = smb_send(ses->server, in_buf, len);
1424         cifs_in_send_dec(ses->server);
1425         cifs_save_when_sent(midQ);
1426
1427         if (rc < 0)
1428                 ses->server->sequence_number -= 2;
1429
1430         mutex_unlock(&ses->server->srv_mutex);
1431
1432         if (rc < 0) {
1433                 cifs_delete_mid(midQ);
1434                 return rc;
1435         }
1436
1437         /* Wait for a reply - allow signals to interrupt. */
1438         rc = wait_event_interruptible(ses->server->response_q,
1439                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1440                 ((ses->server->tcpStatus != CifsGood) &&
1441                  (ses->server->tcpStatus != CifsNew)));
1442
1443         /* Were we interrupted by a signal ? */
1444         if ((rc == -ERESTARTSYS) &&
1445                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1446                 ((ses->server->tcpStatus == CifsGood) ||
1447                  (ses->server->tcpStatus == CifsNew))) {
1448
1449                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1450                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1451                            blocking lock to return. */
1452                         rc = send_cancel(ses->server, &rqst, midQ);
1453                         if (rc) {
1454                                 cifs_delete_mid(midQ);
1455                                 return rc;
1456                         }
1457                 } else {
1458                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1459                            to cause the blocking lock to return. */
1460
1461                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1462
1463                         /* If we get -ENOLCK back the lock may have
1464                            already been removed. Don't exit in this case. */
1465                         if (rc && rc != -ENOLCK) {
1466                                 cifs_delete_mid(midQ);
1467                                 return rc;
1468                         }
1469                 }
1470
1471                 rc = wait_for_response(ses->server, midQ);
1472                 if (rc) {
1473                         send_cancel(ses->server, &rqst, midQ);
1474                         spin_lock(&GlobalMid_Lock);
1475                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1476                                 /* no longer considered to be "in-flight" */
1477                                 midQ->callback = DeleteMidQEntry;
1478                                 spin_unlock(&GlobalMid_Lock);
1479                                 return rc;
1480                         }
1481                         spin_unlock(&GlobalMid_Lock);
1482                 }
1483
1484                 /* We got the response - restart system call. */
1485                 rstart = 1;
1486         }
1487
1488         rc = cifs_sync_mid_result(midQ, ses->server);
1489         if (rc != 0)
1490                 return rc;
1491
1492         /* rcvd frame is ok */
1493         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1494                 rc = -EIO;
1495                 cifs_dbg(VFS, "Bad MID state?\n");
1496                 goto out;
1497         }
1498
1499         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1500         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1501         rc = cifs_check_receive(midQ, ses->server, 0);
1502 out:
1503         cifs_delete_mid(midQ);
1504         if (rstart && rc == -EACCES)
1505                 return -ERESTARTSYS;
1506         return rc;
1507 }