]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - fs/cifs/transport.c
cifs: add a timeout argument to wait_for_free_credits
[zynq/linux.git] / fs / cifs / transport.c
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50         wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56         struct mid_q_entry *temp;
57
58         if (server == NULL) {
59                 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60                 return NULL;
61         }
62
63         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64         memset(temp, 0, sizeof(struct mid_q_entry));
65         kref_init(&temp->refcount);
66         temp->mid = get_mid(smb_buffer);
67         temp->pid = current->pid;
68         temp->command = cpu_to_le16(smb_buffer->Command);
69         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70         /*      do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71         /* when mid allocated can be before when sent */
72         temp->when_alloc = jiffies;
73         temp->server = server;
74
75         /*
76          * The default is for the mid to be synchronous, so the
77          * default callback just wakes up the current task.
78          */
79         temp->callback = cifs_wake_up_task;
80         temp->callback_data = current;
81
82         atomic_inc(&midCount);
83         temp->mid_state = MID_REQUEST_ALLOCATED;
84         return temp;
85 }
86
87 static void _cifs_mid_q_entry_release(struct kref *refcount)
88 {
89         struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90                                                refcount);
91
92         mempool_free(mid, cifs_mid_poolp);
93 }
94
95 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96 {
97         spin_lock(&GlobalMid_Lock);
98         kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99         spin_unlock(&GlobalMid_Lock);
100 }
101
102 void
103 DeleteMidQEntry(struct mid_q_entry *midEntry)
104 {
105 #ifdef CONFIG_CIFS_STATS2
106         __le16 command = midEntry->server->vals->lock_cmd;
107         unsigned long now;
108 #endif
109         midEntry->mid_state = MID_FREE;
110         atomic_dec(&midCount);
111         if (midEntry->large_buf)
112                 cifs_buf_release(midEntry->resp_buf);
113         else
114                 cifs_small_buf_release(midEntry->resp_buf);
115 #ifdef CONFIG_CIFS_STATS2
116         now = jiffies;
117         /*
118          * commands taking longer than one second (default) can be indications
119          * that something is wrong, unless it is quite a slow link or a very
120          * busy server. Note that this calc is unlikely or impossible to wrap
121          * as long as slow_rsp_threshold is not set way above recommended max
122          * value (32767 ie 9 hours) and is generally harmless even if wrong
123          * since only affects debug counters - so leaving the calc as simple
124          * comparison rather than doing multiple conversions and overflow
125          * checks
126          */
127         if ((slow_rsp_threshold != 0) &&
128             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
129             (midEntry->command != command)) {
130                 /*
131                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
132                  * NB: le16_to_cpu returns unsigned so can not be negative below
133                  */
134                 if (le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS)
135                         cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
136
137                 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
138                                midEntry->mid, midEntry->pid,
139                                midEntry->when_sent, midEntry->when_received);
140                 if (cifsFYI & CIFS_TIMER) {
141                         pr_debug(" CIFS slow rsp: cmd %d mid %llu",
142                                midEntry->command, midEntry->mid);
143                         cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
144                                now - midEntry->when_alloc,
145                                now - midEntry->when_sent,
146                                now - midEntry->when_received);
147                 }
148         }
149 #endif
150         cifs_mid_q_entry_release(midEntry);
151 }
152
153 void
154 cifs_delete_mid(struct mid_q_entry *mid)
155 {
156         spin_lock(&GlobalMid_Lock);
157         list_del_init(&mid->qhead);
158         mid->mid_flags |= MID_DELETED;
159         spin_unlock(&GlobalMid_Lock);
160
161         DeleteMidQEntry(mid);
162 }
163
164 /*
165  * smb_send_kvec - send an array of kvecs to the server
166  * @server:     Server to send the data to
167  * @smb_msg:    Message to send
168  * @sent:       amount of data sent on socket is stored here
169  *
170  * Our basic "send data to server" function. Should be called with srv_mutex
171  * held. The caller is responsible for handling the results.
172  */
173 static int
174 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
175               size_t *sent)
176 {
177         int rc = 0;
178         int retries = 0;
179         struct socket *ssocket = server->ssocket;
180
181         *sent = 0;
182
183         smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
184         smb_msg->msg_namelen = sizeof(struct sockaddr);
185         smb_msg->msg_control = NULL;
186         smb_msg->msg_controllen = 0;
187         if (server->noblocksnd)
188                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
189         else
190                 smb_msg->msg_flags = MSG_NOSIGNAL;
191
192         while (msg_data_left(smb_msg)) {
193                 /*
194                  * If blocking send, we try 3 times, since each can block
195                  * for 5 seconds. For nonblocking  we have to try more
196                  * but wait increasing amounts of time allowing time for
197                  * socket to clear.  The overall time we wait in either
198                  * case to send on the socket is about 15 seconds.
199                  * Similarly we wait for 15 seconds for a response from
200                  * the server in SendReceive[2] for the server to send
201                  * a response back for most types of requests (except
202                  * SMB Write past end of file which can be slow, and
203                  * blocking lock operations). NFS waits slightly longer
204                  * than CIFS, but this can make it take longer for
205                  * nonresponsive servers to be detected and 15 seconds
206                  * is more than enough time for modern networks to
207                  * send a packet.  In most cases if we fail to send
208                  * after the retries we will kill the socket and
209                  * reconnect which may clear the network problem.
210                  */
211                 rc = sock_sendmsg(ssocket, smb_msg);
212                 if (rc == -EAGAIN) {
213                         retries++;
214                         if (retries >= 14 ||
215                             (!server->noblocksnd && (retries > 2))) {
216                                 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
217                                          ssocket);
218                                 return -EAGAIN;
219                         }
220                         msleep(1 << retries);
221                         continue;
222                 }
223
224                 if (rc < 0)
225                         return rc;
226
227                 if (rc == 0) {
228                         /* should never happen, letting socket clear before
229                            retrying is our only obvious option here */
230                         cifs_dbg(VFS, "tcp sent no data\n");
231                         msleep(500);
232                         continue;
233                 }
234
235                 /* send was at least partially successful */
236                 *sent += rc;
237                 retries = 0; /* in case we get ENOSPC on the next send */
238         }
239         return 0;
240 }
241
242 unsigned long
243 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
244 {
245         unsigned int i;
246         struct kvec *iov;
247         int nvec;
248         unsigned long buflen = 0;
249
250         if (server->vals->header_preamble_size == 0 &&
251             rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
252                 iov = &rqst->rq_iov[1];
253                 nvec = rqst->rq_nvec - 1;
254         } else {
255                 iov = rqst->rq_iov;
256                 nvec = rqst->rq_nvec;
257         }
258
259         /* total up iov array first */
260         for (i = 0; i < nvec; i++)
261                 buflen += iov[i].iov_len;
262
263         /*
264          * Add in the page array if there is one. The caller needs to make
265          * sure rq_offset and rq_tailsz are set correctly. If a buffer of
266          * multiple pages ends at page boundary, rq_tailsz needs to be set to
267          * PAGE_SIZE.
268          */
269         if (rqst->rq_npages) {
270                 if (rqst->rq_npages == 1)
271                         buflen += rqst->rq_tailsz;
272                 else {
273                         /*
274                          * If there is more than one page, calculate the
275                          * buffer length based on rq_offset and rq_tailsz
276                          */
277                         buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
278                                         rqst->rq_offset;
279                         buflen += rqst->rq_tailsz;
280                 }
281         }
282
283         return buflen;
284 }
285
286 static int
287 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
288                 struct smb_rqst *rqst)
289 {
290         int rc = 0;
291         struct kvec *iov;
292         int n_vec;
293         unsigned int send_length = 0;
294         unsigned int i, j;
295         sigset_t mask, oldmask;
296         size_t total_len = 0, sent, size;
297         struct socket *ssocket = server->ssocket;
298         struct msghdr smb_msg;
299         int val = 1;
300         __be32 rfc1002_marker;
301
302         if (cifs_rdma_enabled(server) && server->smbd_conn) {
303                 rc = smbd_send(server, rqst);
304                 goto smbd_done;
305         }
306
307         if (ssocket == NULL)
308                 return -EAGAIN;
309
310         if (signal_pending(current)) {
311                 cifs_dbg(FYI, "signal is pending before sending any data\n");
312                 return -EINTR;
313         }
314
315         /* cork the socket */
316         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
317                                 (char *)&val, sizeof(val));
318
319         for (j = 0; j < num_rqst; j++)
320                 send_length += smb_rqst_len(server, &rqst[j]);
321         rfc1002_marker = cpu_to_be32(send_length);
322
323         /*
324          * We should not allow signals to interrupt the network send because
325          * any partial send will cause session reconnects thus increasing
326          * latency of system calls and overload a server with unnecessary
327          * requests.
328          */
329
330         sigfillset(&mask);
331         sigprocmask(SIG_BLOCK, &mask, &oldmask);
332
333         /* Generate a rfc1002 marker for SMB2+ */
334         if (server->vals->header_preamble_size == 0) {
335                 struct kvec hiov = {
336                         .iov_base = &rfc1002_marker,
337                         .iov_len  = 4
338                 };
339                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
340                 rc = smb_send_kvec(server, &smb_msg, &sent);
341                 if (rc < 0)
342                         goto unmask;
343
344                 total_len += sent;
345                 send_length += 4;
346         }
347
348         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
349
350         for (j = 0; j < num_rqst; j++) {
351                 iov = rqst[j].rq_iov;
352                 n_vec = rqst[j].rq_nvec;
353
354                 size = 0;
355                 for (i = 0; i < n_vec; i++) {
356                         dump_smb(iov[i].iov_base, iov[i].iov_len);
357                         size += iov[i].iov_len;
358                 }
359
360                 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
361
362                 rc = smb_send_kvec(server, &smb_msg, &sent);
363                 if (rc < 0)
364                         goto unmask;
365
366                 total_len += sent;
367
368                 /* now walk the page array and send each page in it */
369                 for (i = 0; i < rqst[j].rq_npages; i++) {
370                         struct bio_vec bvec;
371
372                         bvec.bv_page = rqst[j].rq_pages[i];
373                         rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
374                                              &bvec.bv_offset);
375
376                         iov_iter_bvec(&smb_msg.msg_iter, WRITE,
377                                       &bvec, 1, bvec.bv_len);
378                         rc = smb_send_kvec(server, &smb_msg, &sent);
379                         if (rc < 0)
380                                 break;
381
382                         total_len += sent;
383                 }
384         }
385
386 unmask:
387         sigprocmask(SIG_SETMASK, &oldmask, NULL);
388
389         /*
390          * If signal is pending but we have already sent the whole packet to
391          * the server we need to return success status to allow a corresponding
392          * mid entry to be kept in the pending requests queue thus allowing
393          * to handle responses from the server by the client.
394          *
395          * If only part of the packet has been sent there is no need to hide
396          * interrupt because the session will be reconnected anyway, so there
397          * won't be any response from the server to handle.
398          */
399
400         if (signal_pending(current) && (total_len != send_length)) {
401                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
402                 rc = -EINTR;
403         }
404
405         /* uncork it */
406         val = 0;
407         kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
408                                 (char *)&val, sizeof(val));
409
410         if ((total_len > 0) && (total_len != send_length)) {
411                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
412                          send_length, total_len);
413                 /*
414                  * If we have only sent part of an SMB then the next SMB could
415                  * be taken as the remainder of this one. We need to kill the
416                  * socket so the server throws away the partial SMB
417                  */
418                 server->tcpStatus = CifsNeedReconnect;
419                 trace_smb3_partial_send_reconnect(server->CurrentMid,
420                                                   server->hostname);
421         }
422 smbd_done:
423         if (rc < 0 && rc != -EINTR)
424                 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
425                          rc);
426         else if (rc > 0)
427                 rc = 0;
428
429         return rc;
430 }
431
432 static int
433 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
434               struct smb_rqst *rqst, int flags)
435 {
436         struct kvec iov;
437         struct smb2_transform_hdr tr_hdr;
438         struct smb_rqst cur_rqst[MAX_COMPOUND];
439         int rc;
440
441         if (!(flags & CIFS_TRANSFORM_REQ))
442                 return __smb_send_rqst(server, num_rqst, rqst);
443
444         if (num_rqst > MAX_COMPOUND - 1)
445                 return -ENOMEM;
446
447         memset(&cur_rqst[0], 0, sizeof(cur_rqst));
448         memset(&iov, 0, sizeof(iov));
449         memset(&tr_hdr, 0, sizeof(tr_hdr));
450
451         iov.iov_base = &tr_hdr;
452         iov.iov_len = sizeof(tr_hdr);
453         cur_rqst[0].rq_iov = &iov;
454         cur_rqst[0].rq_nvec = 1;
455
456         if (!server->ops->init_transform_rq) {
457                 cifs_dbg(VFS, "Encryption requested but transform callback "
458                          "is missing\n");
459                 return -EIO;
460         }
461
462         rc = server->ops->init_transform_rq(server, num_rqst + 1,
463                                             &cur_rqst[0], rqst);
464         if (rc)
465                 return rc;
466
467         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
468         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
469         return rc;
470 }
471
472 int
473 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
474          unsigned int smb_buf_length)
475 {
476         struct kvec iov[2];
477         struct smb_rqst rqst = { .rq_iov = iov,
478                                  .rq_nvec = 2 };
479
480         iov[0].iov_base = smb_buffer;
481         iov[0].iov_len = 4;
482         iov[1].iov_base = (char *)smb_buffer + 4;
483         iov[1].iov_len = smb_buf_length;
484
485         return __smb_send_rqst(server, 1, &rqst);
486 }
487
488 static int
489 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
490                       const int timeout, const int flags,
491                       unsigned int *instance)
492 {
493         int rc;
494         int *credits;
495         int optype;
496         long int t;
497
498         if (timeout < 0)
499                 t = MAX_JIFFY_OFFSET;
500         else
501                 t = msecs_to_jiffies(timeout);
502
503         optype = flags & CIFS_OP_MASK;
504
505         *instance = 0;
506
507         credits = server->ops->get_credits_field(server, optype);
508         /* Since an echo is already inflight, no need to wait to send another */
509         if (*credits <= 0 && optype == CIFS_ECHO_OP)
510                 return -EAGAIN;
511
512         spin_lock(&server->req_lock);
513         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP) {
514                 /* oplock breaks must not be held up */
515                 server->in_flight++;
516                 *credits -= 1;
517                 *instance = server->reconnect_instance;
518                 spin_unlock(&server->req_lock);
519                 return 0;
520         }
521
522         while (1) {
523                 if (*credits < num_credits) {
524                         spin_unlock(&server->req_lock);
525                         cifs_num_waiters_inc(server);
526                         rc = wait_event_killable_timeout(server->request_q,
527                                 has_credits(server, credits, num_credits), t);
528                         cifs_num_waiters_dec(server);
529                         if (!rc) {
530                                 cifs_dbg(VFS, "wait timed out after %d ms\n",
531                                          timeout);
532                                 return -ENOTSUPP;
533                         }
534                         if (rc == -ERESTARTSYS)
535                                 return -ERESTARTSYS;
536                         spin_lock(&server->req_lock);
537                 } else {
538                         if (server->tcpStatus == CifsExiting) {
539                                 spin_unlock(&server->req_lock);
540                                 return -ENOENT;
541                         }
542
543                         /*
544                          * For normal commands, reserve the last MAX_COMPOUND
545                          * credits to compound requests.
546                          * Otherwise these compounds could be permanently
547                          * starved for credits by single-credit requests.
548                          *
549                          * To prevent spinning CPU, block this thread until
550                          * there are >MAX_COMPOUND credits available.
551                          * But only do this is we already have a lot of
552                          * credits in flight to avoid triggering this check
553                          * for servers that are slow to hand out credits on
554                          * new sessions.
555                          */
556                         if (!optype && num_credits == 1 &&
557                             server->in_flight > 2 * MAX_COMPOUND &&
558                             *credits <= MAX_COMPOUND) {
559                                 spin_unlock(&server->req_lock);
560                                 cifs_num_waiters_inc(server);
561                                 rc = wait_event_killable_timeout(
562                                         server->request_q,
563                                         has_credits(server, credits,
564                                                     MAX_COMPOUND + 1),
565                                         t);
566                                 cifs_num_waiters_dec(server);
567                                 if (!rc) {
568                                         cifs_dbg(VFS, "wait timed out after %d ms\n",
569                                                  timeout);
570                                         return -ENOTSUPP;
571                                 }
572                                 if (rc == -ERESTARTSYS)
573                                         return -ERESTARTSYS;
574                                 spin_lock(&server->req_lock);
575                                 continue;
576                         }
577
578                         /*
579                          * Can not count locking commands against total
580                          * as they are allowed to block on server.
581                          */
582
583                         /* update # of requests on the wire to server */
584                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
585                                 *credits -= num_credits;
586                                 server->in_flight += num_credits;
587                                 *instance = server->reconnect_instance;
588                         }
589                         spin_unlock(&server->req_lock);
590                         break;
591                 }
592         }
593         return 0;
594 }
595
596 static int
597 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
598                       unsigned int *instance)
599 {
600         return wait_for_free_credits(server, 1, -1, flags,
601                                      instance);
602 }
603
604 int
605 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
606                       unsigned int *num, struct cifs_credits *credits)
607 {
608         *num = size;
609         credits->value = 0;
610         credits->instance = server->reconnect_instance;
611         return 0;
612 }
613
614 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
615                         struct mid_q_entry **ppmidQ)
616 {
617         if (ses->server->tcpStatus == CifsExiting) {
618                 return -ENOENT;
619         }
620
621         if (ses->server->tcpStatus == CifsNeedReconnect) {
622                 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
623                 return -EAGAIN;
624         }
625
626         if (ses->status == CifsNew) {
627                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
628                         (in_buf->Command != SMB_COM_NEGOTIATE))
629                         return -EAGAIN;
630                 /* else ok - we are setting up session */
631         }
632
633         if (ses->status == CifsExiting) {
634                 /* check if SMB session is bad because we are setting it up */
635                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
636                         return -EAGAIN;
637                 /* else ok - we are shutting down session */
638         }
639
640         *ppmidQ = AllocMidQEntry(in_buf, ses->server);
641         if (*ppmidQ == NULL)
642                 return -ENOMEM;
643         spin_lock(&GlobalMid_Lock);
644         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
645         spin_unlock(&GlobalMid_Lock);
646         return 0;
647 }
648
649 static int
650 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
651 {
652         int error;
653
654         error = wait_event_freezekillable_unsafe(server->response_q,
655                                     midQ->mid_state != MID_REQUEST_SUBMITTED);
656         if (error < 0)
657                 return -ERESTARTSYS;
658
659         return 0;
660 }
661
662 struct mid_q_entry *
663 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
664 {
665         int rc;
666         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
667         struct mid_q_entry *mid;
668
669         if (rqst->rq_iov[0].iov_len != 4 ||
670             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
671                 return ERR_PTR(-EIO);
672
673         /* enable signing if server requires it */
674         if (server->sign)
675                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
676
677         mid = AllocMidQEntry(hdr, server);
678         if (mid == NULL)
679                 return ERR_PTR(-ENOMEM);
680
681         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
682         if (rc) {
683                 DeleteMidQEntry(mid);
684                 return ERR_PTR(rc);
685         }
686
687         return mid;
688 }
689
690 /*
691  * Send a SMB request and set the callback function in the mid to handle
692  * the result. Caller is responsible for dealing with timeouts.
693  */
694 int
695 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
696                 mid_receive_t *receive, mid_callback_t *callback,
697                 mid_handle_t *handle, void *cbdata, const int flags,
698                 const struct cifs_credits *exist_credits)
699 {
700         int rc;
701         struct mid_q_entry *mid;
702         struct cifs_credits credits = { .value = 0, .instance = 0 };
703         unsigned int instance;
704         int optype;
705
706         optype = flags & CIFS_OP_MASK;
707
708         if ((flags & CIFS_HAS_CREDITS) == 0) {
709                 rc = wait_for_free_request(server, flags, &instance);
710                 if (rc)
711                         return rc;
712                 credits.value = 1;
713                 credits.instance = instance;
714         } else
715                 instance = exist_credits->instance;
716
717         mutex_lock(&server->srv_mutex);
718
719         /*
720          * We can't use credits obtained from the previous session to send this
721          * request. Check if there were reconnects after we obtained credits and
722          * return -EAGAIN in such cases to let callers handle it.
723          */
724         if (instance != server->reconnect_instance) {
725                 mutex_unlock(&server->srv_mutex);
726                 add_credits_and_wake_if(server, &credits, optype);
727                 return -EAGAIN;
728         }
729
730         mid = server->ops->setup_async_request(server, rqst);
731         if (IS_ERR(mid)) {
732                 mutex_unlock(&server->srv_mutex);
733                 add_credits_and_wake_if(server, &credits, optype);
734                 return PTR_ERR(mid);
735         }
736
737         mid->receive = receive;
738         mid->callback = callback;
739         mid->callback_data = cbdata;
740         mid->handle = handle;
741         mid->mid_state = MID_REQUEST_SUBMITTED;
742
743         /* put it on the pending_mid_q */
744         spin_lock(&GlobalMid_Lock);
745         list_add_tail(&mid->qhead, &server->pending_mid_q);
746         spin_unlock(&GlobalMid_Lock);
747
748         /*
749          * Need to store the time in mid before calling I/O. For call_async,
750          * I/O response may come back and free the mid entry on another thread.
751          */
752         cifs_save_when_sent(mid);
753         cifs_in_send_inc(server);
754         rc = smb_send_rqst(server, 1, rqst, flags);
755         cifs_in_send_dec(server);
756
757         if (rc < 0) {
758                 revert_current_mid(server, mid->credits);
759                 server->sequence_number -= 2;
760                 cifs_delete_mid(mid);
761         }
762
763         mutex_unlock(&server->srv_mutex);
764
765         if (rc == 0)
766                 return 0;
767
768         add_credits_and_wake_if(server, &credits, optype);
769         return rc;
770 }
771
772 /*
773  *
774  * Send an SMB Request.  No response info (other than return code)
775  * needs to be parsed.
776  *
777  * flags indicate the type of request buffer and how long to wait
778  * and whether to log NT STATUS code (error) before mapping it to POSIX error
779  *
780  */
781 int
782 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
783                  char *in_buf, int flags)
784 {
785         int rc;
786         struct kvec iov[1];
787         struct kvec rsp_iov;
788         int resp_buf_type;
789
790         iov[0].iov_base = in_buf;
791         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
792         flags |= CIFS_NO_RESP;
793         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
794         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
795
796         return rc;
797 }
798
799 static int
800 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
801 {
802         int rc = 0;
803
804         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
805                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
806
807         spin_lock(&GlobalMid_Lock);
808         switch (mid->mid_state) {
809         case MID_RESPONSE_RECEIVED:
810                 spin_unlock(&GlobalMid_Lock);
811                 return rc;
812         case MID_RETRY_NEEDED:
813                 rc = -EAGAIN;
814                 break;
815         case MID_RESPONSE_MALFORMED:
816                 rc = -EIO;
817                 break;
818         case MID_SHUTDOWN:
819                 rc = -EHOSTDOWN;
820                 break;
821         default:
822                 list_del_init(&mid->qhead);
823                 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
824                          __func__, mid->mid, mid->mid_state);
825                 rc = -EIO;
826         }
827         spin_unlock(&GlobalMid_Lock);
828
829         DeleteMidQEntry(mid);
830         return rc;
831 }
832
833 static inline int
834 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
835             struct mid_q_entry *mid)
836 {
837         return server->ops->send_cancel ?
838                                 server->ops->send_cancel(server, rqst, mid) : 0;
839 }
840
841 int
842 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
843                    bool log_error)
844 {
845         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
846
847         dump_smb(mid->resp_buf, min_t(u32, 92, len));
848
849         /* convert the length into a more usable form */
850         if (server->sign) {
851                 struct kvec iov[2];
852                 int rc = 0;
853                 struct smb_rqst rqst = { .rq_iov = iov,
854                                          .rq_nvec = 2 };
855
856                 iov[0].iov_base = mid->resp_buf;
857                 iov[0].iov_len = 4;
858                 iov[1].iov_base = (char *)mid->resp_buf + 4;
859                 iov[1].iov_len = len - 4;
860                 /* FIXME: add code to kill session */
861                 rc = cifs_verify_signature(&rqst, server,
862                                            mid->sequence_number);
863                 if (rc)
864                         cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
865                                  rc);
866         }
867
868         /* BB special case reconnect tid and uid here? */
869         return map_smb_to_linux_error(mid->resp_buf, log_error);
870 }
871
872 struct mid_q_entry *
873 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
874 {
875         int rc;
876         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
877         struct mid_q_entry *mid;
878
879         if (rqst->rq_iov[0].iov_len != 4 ||
880             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
881                 return ERR_PTR(-EIO);
882
883         rc = allocate_mid(ses, hdr, &mid);
884         if (rc)
885                 return ERR_PTR(rc);
886         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
887         if (rc) {
888                 cifs_delete_mid(mid);
889                 return ERR_PTR(rc);
890         }
891         return mid;
892 }
893
894 static void
895 cifs_compound_callback(struct mid_q_entry *mid)
896 {
897         struct TCP_Server_Info *server = mid->server;
898         struct cifs_credits credits;
899
900         credits.value = server->ops->get_credits(mid);
901         credits.instance = server->reconnect_instance;
902
903         add_credits(server, &credits, mid->optype);
904 }
905
906 static void
907 cifs_compound_last_callback(struct mid_q_entry *mid)
908 {
909         cifs_compound_callback(mid);
910         cifs_wake_up_task(mid);
911 }
912
913 static void
914 cifs_cancelled_callback(struct mid_q_entry *mid)
915 {
916         cifs_compound_callback(mid);
917         DeleteMidQEntry(mid);
918 }
919
920 int
921 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
922                    const int flags, const int num_rqst, struct smb_rqst *rqst,
923                    int *resp_buf_type, struct kvec *resp_iov)
924 {
925         int i, j, optype, rc = 0;
926         struct mid_q_entry *midQ[MAX_COMPOUND];
927         bool cancelled_mid[MAX_COMPOUND] = {false};
928         struct cifs_credits credits[MAX_COMPOUND] = {
929                 { .value = 0, .instance = 0 }
930         };
931         unsigned int instance;
932         unsigned int first_instance = 0;
933         char *buf;
934
935         optype = flags & CIFS_OP_MASK;
936
937         for (i = 0; i < num_rqst; i++)
938                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
939
940         if ((ses == NULL) || (ses->server == NULL)) {
941                 cifs_dbg(VFS, "Null session\n");
942                 return -EIO;
943         }
944
945         if (ses->server->tcpStatus == CifsExiting)
946                 return -ENOENT;
947
948         spin_lock(&ses->server->req_lock);
949         if (ses->server->credits < num_rqst) {
950                 /*
951                  * Return immediately if not too many requests in flight since
952                  * we will likely be stuck on waiting for credits.
953                  */
954                 if (ses->server->in_flight < num_rqst - ses->server->credits) {
955                         spin_unlock(&ses->server->req_lock);
956                         return -ENOTSUPP;
957                 }
958         } else {
959                 /* enough credits to send the whole compounded request */
960                 ses->server->credits -= num_rqst;
961                 ses->server->in_flight += num_rqst;
962                 first_instance = ses->server->reconnect_instance;
963         }
964         spin_unlock(&ses->server->req_lock);
965
966         if (first_instance) {
967                 cifs_dbg(FYI, "Acquired %d credits at once\n", num_rqst);
968                 for (i = 0; i < num_rqst; i++) {
969                         credits[i].value = 1;
970                         credits[i].instance = first_instance;
971                 }
972                 goto setup_rqsts;
973         }
974
975         /*
976          * There are not enough credits to send the whole compound request but
977          * there are requests in flight that may bring credits from the server.
978          * This approach still leaves the possibility to be stuck waiting for
979          * credits if the server doesn't grant credits to the outstanding
980          * requests. This should be fixed by returning immediately and letting
981          * a caller fallback to sequential commands instead of compounding.
982          * Ensure we obtain 1 credit per request in the compound chain.
983          */
984         for (i = 0; i < num_rqst; i++) {
985                 rc = wait_for_free_request(ses->server, flags, &instance);
986
987                 if (rc == 0) {
988                         credits[i].value = 1;
989                         credits[i].instance = instance;
990                         /*
991                          * All parts of the compound chain must get credits from
992                          * the same session, otherwise we may end up using more
993                          * credits than the server granted. If there were
994                          * reconnects in between, return -EAGAIN and let callers
995                          * handle it.
996                          */
997                         if (i == 0)
998                                 first_instance = instance;
999                         else if (first_instance != instance) {
1000                                 i++;
1001                                 rc = -EAGAIN;
1002                         }
1003                 }
1004
1005                 if (rc) {
1006                         /*
1007                          * We haven't sent an SMB packet to the server yet but
1008                          * we already obtained credits for i requests in the
1009                          * compound chain - need to return those credits back
1010                          * for future use. Note that we need to call add_credits
1011                          * multiple times to match the way we obtained credits
1012                          * in the first place and to account for in flight
1013                          * requests correctly.
1014                          */
1015                         for (j = 0; j < i; j++)
1016                                 add_credits(ses->server, &credits[j], optype);
1017                         return rc;
1018                 }
1019         }
1020
1021 setup_rqsts:
1022         /*
1023          * Make sure that we sign in the same order that we send on this socket
1024          * and avoid races inside tcp sendmsg code that could cause corruption
1025          * of smb data.
1026          */
1027
1028         mutex_lock(&ses->server->srv_mutex);
1029
1030         /*
1031          * All the parts of the compound chain belong obtained credits from the
1032          * same session (see the appropriate checks above). In the same time
1033          * there might be reconnects after those checks but before we acquired
1034          * the srv_mutex. We can not use credits obtained from the previous
1035          * session to send this request. Check if there were reconnects after
1036          * we obtained credits and return -EAGAIN in such cases to let callers
1037          * handle it.
1038          */
1039         if (first_instance != ses->server->reconnect_instance) {
1040                 mutex_unlock(&ses->server->srv_mutex);
1041                 for (j = 0; j < num_rqst; j++)
1042                         add_credits(ses->server, &credits[j], optype);
1043                 return -EAGAIN;
1044         }
1045
1046         for (i = 0; i < num_rqst; i++) {
1047                 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
1048                 if (IS_ERR(midQ[i])) {
1049                         revert_current_mid(ses->server, i);
1050                         for (j = 0; j < i; j++)
1051                                 cifs_delete_mid(midQ[j]);
1052                         mutex_unlock(&ses->server->srv_mutex);
1053
1054                         /* Update # of requests on wire to server */
1055                         for (j = 0; j < num_rqst; j++)
1056                                 add_credits(ses->server, &credits[j], optype);
1057                         return PTR_ERR(midQ[i]);
1058                 }
1059
1060                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1061                 midQ[i]->optype = optype;
1062                 /*
1063                  * Invoke callback for every part of the compound chain
1064                  * to calculate credits properly. Wake up this thread only when
1065                  * the last element is received.
1066                  */
1067                 if (i < num_rqst - 1)
1068                         midQ[i]->callback = cifs_compound_callback;
1069                 else
1070                         midQ[i]->callback = cifs_compound_last_callback;
1071         }
1072         cifs_in_send_inc(ses->server);
1073         rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
1074         cifs_in_send_dec(ses->server);
1075
1076         for (i = 0; i < num_rqst; i++)
1077                 cifs_save_when_sent(midQ[i]);
1078
1079         if (rc < 0) {
1080                 revert_current_mid(ses->server, num_rqst);
1081                 ses->server->sequence_number -= 2;
1082         }
1083
1084         mutex_unlock(&ses->server->srv_mutex);
1085
1086         if (rc < 0) {
1087                 /* Sending failed for some reason - return credits back */
1088                 for (i = 0; i < num_rqst; i++)
1089                         add_credits(ses->server, &credits[i], optype);
1090                 goto out;
1091         }
1092
1093         /*
1094          * At this point the request is passed to the network stack - we assume
1095          * that any credits taken from the server structure on the client have
1096          * been spent and we can't return them back. Once we receive responses
1097          * we will collect credits granted by the server in the mid callbacks
1098          * and add those credits to the server structure.
1099          */
1100
1101         /*
1102          * Compounding is never used during session establish.
1103          */
1104         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1105                 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1106                                            rqst[0].rq_nvec);
1107
1108         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
1109                 goto out;
1110
1111         for (i = 0; i < num_rqst; i++) {
1112                 rc = wait_for_response(ses->server, midQ[i]);
1113                 if (rc != 0)
1114                         break;
1115         }
1116         if (rc != 0) {
1117                 for (; i < num_rqst; i++) {
1118                         cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1119                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1120                         send_cancel(ses->server, &rqst[i], midQ[i]);
1121                         spin_lock(&GlobalMid_Lock);
1122                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1123                                 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1124                                 midQ[i]->callback = cifs_cancelled_callback;
1125                                 cancelled_mid[i] = true;
1126                                 credits[i].value = 0;
1127                         }
1128                         spin_unlock(&GlobalMid_Lock);
1129                 }
1130         }
1131
1132         for (i = 0; i < num_rqst; i++) {
1133                 if (rc < 0)
1134                         goto out;
1135
1136                 rc = cifs_sync_mid_result(midQ[i], ses->server);
1137                 if (rc != 0) {
1138                         /* mark this mid as cancelled to not free it below */
1139                         cancelled_mid[i] = true;
1140                         goto out;
1141                 }
1142
1143                 if (!midQ[i]->resp_buf ||
1144                     midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1145                         rc = -EIO;
1146                         cifs_dbg(FYI, "Bad MID state?\n");
1147                         goto out;
1148                 }
1149
1150                 buf = (char *)midQ[i]->resp_buf;
1151                 resp_iov[i].iov_base = buf;
1152                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1153                         ses->server->vals->header_preamble_size;
1154
1155                 if (midQ[i]->large_buf)
1156                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1157                 else
1158                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1159
1160                 rc = ses->server->ops->check_receive(midQ[i], ses->server,
1161                                                      flags & CIFS_LOG_ERROR);
1162
1163                 /* mark it so buf will not be freed by cifs_delete_mid */
1164                 if ((flags & CIFS_NO_RESP) == 0)
1165                         midQ[i]->resp_buf = NULL;
1166
1167         }
1168
1169         /*
1170          * Compounding is never used during session establish.
1171          */
1172         if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1173                 struct kvec iov = {
1174                         .iov_base = resp_iov[0].iov_base,
1175                         .iov_len = resp_iov[0].iov_len
1176                 };
1177                 smb311_update_preauth_hash(ses, &iov, 1);
1178         }
1179
1180 out:
1181         /*
1182          * This will dequeue all mids. After this it is important that the
1183          * demultiplex_thread will not process any of these mids any futher.
1184          * This is prevented above by using a noop callback that will not
1185          * wake this thread except for the very last PDU.
1186          */
1187         for (i = 0; i < num_rqst; i++) {
1188                 if (!cancelled_mid[i])
1189                         cifs_delete_mid(midQ[i]);
1190         }
1191
1192         return rc;
1193 }
1194
1195 int
1196 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1197                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1198                struct kvec *resp_iov)
1199 {
1200         return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1201                                   resp_iov);
1202 }
1203
1204 int
1205 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1206              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1207              const int flags, struct kvec *resp_iov)
1208 {
1209         struct smb_rqst rqst;
1210         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1211         int rc;
1212
1213         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1214                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1215                                         GFP_KERNEL);
1216                 if (!new_iov) {
1217                         /* otherwise cifs_send_recv below sets resp_buf_type */
1218                         *resp_buf_type = CIFS_NO_BUFFER;
1219                         return -ENOMEM;
1220                 }
1221         } else
1222                 new_iov = s_iov;
1223
1224         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1225         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1226
1227         new_iov[0].iov_base = new_iov[1].iov_base;
1228         new_iov[0].iov_len = 4;
1229         new_iov[1].iov_base += 4;
1230         new_iov[1].iov_len -= 4;
1231
1232         memset(&rqst, 0, sizeof(struct smb_rqst));
1233         rqst.rq_iov = new_iov;
1234         rqst.rq_nvec = n_vec + 1;
1235
1236         rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1237         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1238                 kfree(new_iov);
1239         return rc;
1240 }
1241
1242 int
1243 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1244             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1245             int *pbytes_returned, const int flags)
1246 {
1247         int rc = 0;
1248         struct mid_q_entry *midQ;
1249         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1250         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1251         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1252         struct cifs_credits credits = { .value = 1, .instance = 0 };
1253
1254         if (ses == NULL) {
1255                 cifs_dbg(VFS, "Null smb session\n");
1256                 return -EIO;
1257         }
1258         if (ses->server == NULL) {
1259                 cifs_dbg(VFS, "Null tcp session\n");
1260                 return -EIO;
1261         }
1262
1263         if (ses->server->tcpStatus == CifsExiting)
1264                 return -ENOENT;
1265
1266         /* Ensure that we do not send more than 50 overlapping requests
1267            to the same server. We may make this configurable later or
1268            use ses->maxReq */
1269
1270         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1271                 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1272                          len);
1273                 return -EIO;
1274         }
1275
1276         rc = wait_for_free_request(ses->server, flags, &credits.instance);
1277         if (rc)
1278                 return rc;
1279
1280         /* make sure that we sign in the same order that we send on this socket
1281            and avoid races inside tcp sendmsg code that could cause corruption
1282            of smb data */
1283
1284         mutex_lock(&ses->server->srv_mutex);
1285
1286         rc = allocate_mid(ses, in_buf, &midQ);
1287         if (rc) {
1288                 mutex_unlock(&ses->server->srv_mutex);
1289                 /* Update # of requests on wire to server */
1290                 add_credits(ses->server, &credits, 0);
1291                 return rc;
1292         }
1293
1294         rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1295         if (rc) {
1296                 mutex_unlock(&ses->server->srv_mutex);
1297                 goto out;
1298         }
1299
1300         midQ->mid_state = MID_REQUEST_SUBMITTED;
1301
1302         cifs_in_send_inc(ses->server);
1303         rc = smb_send(ses->server, in_buf, len);
1304         cifs_in_send_dec(ses->server);
1305         cifs_save_when_sent(midQ);
1306
1307         if (rc < 0)
1308                 ses->server->sequence_number -= 2;
1309
1310         mutex_unlock(&ses->server->srv_mutex);
1311
1312         if (rc < 0)
1313                 goto out;
1314
1315         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
1316                 goto out;
1317
1318         rc = wait_for_response(ses->server, midQ);
1319         if (rc != 0) {
1320                 send_cancel(ses->server, &rqst, midQ);
1321                 spin_lock(&GlobalMid_Lock);
1322                 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1323                         /* no longer considered to be "in-flight" */
1324                         midQ->callback = DeleteMidQEntry;
1325                         spin_unlock(&GlobalMid_Lock);
1326                         add_credits(ses->server, &credits, 0);
1327                         return rc;
1328                 }
1329                 spin_unlock(&GlobalMid_Lock);
1330         }
1331
1332         rc = cifs_sync_mid_result(midQ, ses->server);
1333         if (rc != 0) {
1334                 add_credits(ses->server, &credits, 0);
1335                 return rc;
1336         }
1337
1338         if (!midQ->resp_buf || !out_buf ||
1339             midQ->mid_state != MID_RESPONSE_RECEIVED) {
1340                 rc = -EIO;
1341                 cifs_dbg(VFS, "Bad MID state?\n");
1342                 goto out;
1343         }
1344
1345         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1346         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1347         rc = cifs_check_receive(midQ, ses->server, 0);
1348 out:
1349         cifs_delete_mid(midQ);
1350         add_credits(ses->server, &credits, 0);
1351
1352         return rc;
1353 }
1354
1355 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1356    blocking lock to return. */
1357
1358 static int
1359 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1360                         struct smb_hdr *in_buf,
1361                         struct smb_hdr *out_buf)
1362 {
1363         int bytes_returned;
1364         struct cifs_ses *ses = tcon->ses;
1365         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1366
1367         /* We just modify the current in_buf to change
1368            the type of lock from LOCKING_ANDX_SHARED_LOCK
1369            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1370            LOCKING_ANDX_CANCEL_LOCK. */
1371
1372         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1373         pSMB->Timeout = 0;
1374         pSMB->hdr.Mid = get_next_mid(ses->server);
1375
1376         return SendReceive(xid, ses, in_buf, out_buf,
1377                         &bytes_returned, 0);
1378 }
1379
1380 int
1381 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1382             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1383             int *pbytes_returned)
1384 {
1385         int rc = 0;
1386         int rstart = 0;
1387         struct mid_q_entry *midQ;
1388         struct cifs_ses *ses;
1389         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1390         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1391         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1392         unsigned int instance;
1393
1394         if (tcon == NULL || tcon->ses == NULL) {
1395                 cifs_dbg(VFS, "Null smb session\n");
1396                 return -EIO;
1397         }
1398         ses = tcon->ses;
1399
1400         if (ses->server == NULL) {
1401                 cifs_dbg(VFS, "Null tcp session\n");
1402                 return -EIO;
1403         }
1404
1405         if (ses->server->tcpStatus == CifsExiting)
1406                 return -ENOENT;
1407
1408         /* Ensure that we do not send more than 50 overlapping requests
1409            to the same server. We may make this configurable later or
1410            use ses->maxReq */
1411
1412         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1413                 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1414                          len);
1415                 return -EIO;
1416         }
1417
1418         rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
1419         if (rc)
1420                 return rc;
1421
1422         /* make sure that we sign in the same order that we send on this socket
1423            and avoid races inside tcp sendmsg code that could cause corruption
1424            of smb data */
1425
1426         mutex_lock(&ses->server->srv_mutex);
1427
1428         rc = allocate_mid(ses, in_buf, &midQ);
1429         if (rc) {
1430                 mutex_unlock(&ses->server->srv_mutex);
1431                 return rc;
1432         }
1433
1434         rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1435         if (rc) {
1436                 cifs_delete_mid(midQ);
1437                 mutex_unlock(&ses->server->srv_mutex);
1438                 return rc;
1439         }
1440
1441         midQ->mid_state = MID_REQUEST_SUBMITTED;
1442         cifs_in_send_inc(ses->server);
1443         rc = smb_send(ses->server, in_buf, len);
1444         cifs_in_send_dec(ses->server);
1445         cifs_save_when_sent(midQ);
1446
1447         if (rc < 0)
1448                 ses->server->sequence_number -= 2;
1449
1450         mutex_unlock(&ses->server->srv_mutex);
1451
1452         if (rc < 0) {
1453                 cifs_delete_mid(midQ);
1454                 return rc;
1455         }
1456
1457         /* Wait for a reply - allow signals to interrupt. */
1458         rc = wait_event_interruptible(ses->server->response_q,
1459                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1460                 ((ses->server->tcpStatus != CifsGood) &&
1461                  (ses->server->tcpStatus != CifsNew)));
1462
1463         /* Were we interrupted by a signal ? */
1464         if ((rc == -ERESTARTSYS) &&
1465                 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1466                 ((ses->server->tcpStatus == CifsGood) ||
1467                  (ses->server->tcpStatus == CifsNew))) {
1468
1469                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1470                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1471                            blocking lock to return. */
1472                         rc = send_cancel(ses->server, &rqst, midQ);
1473                         if (rc) {
1474                                 cifs_delete_mid(midQ);
1475                                 return rc;
1476                         }
1477                 } else {
1478                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1479                            to cause the blocking lock to return. */
1480
1481                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1482
1483                         /* If we get -ENOLCK back the lock may have
1484                            already been removed. Don't exit in this case. */
1485                         if (rc && rc != -ENOLCK) {
1486                                 cifs_delete_mid(midQ);
1487                                 return rc;
1488                         }
1489                 }
1490
1491                 rc = wait_for_response(ses->server, midQ);
1492                 if (rc) {
1493                         send_cancel(ses->server, &rqst, midQ);
1494                         spin_lock(&GlobalMid_Lock);
1495                         if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1496                                 /* no longer considered to be "in-flight" */
1497                                 midQ->callback = DeleteMidQEntry;
1498                                 spin_unlock(&GlobalMid_Lock);
1499                                 return rc;
1500                         }
1501                         spin_unlock(&GlobalMid_Lock);
1502                 }
1503
1504                 /* We got the response - restart system call. */
1505                 rstart = 1;
1506         }
1507
1508         rc = cifs_sync_mid_result(midQ, ses->server);
1509         if (rc != 0)
1510                 return rc;
1511
1512         /* rcvd frame is ok */
1513         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1514                 rc = -EIO;
1515                 cifs_dbg(VFS, "Bad MID state?\n");
1516                 goto out;
1517         }
1518
1519         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1520         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1521         rc = cifs_check_receive(midQ, ses->server, 0);
1522 out:
1523         cifs_delete_mid(midQ);
1524         if (rstart && rc == -EACCES)
1525                 return -ERESTARTSYS;
1526         return rc;
1527 }