blob: 0c4df56c825abf30fd8bc9af035010eb32f6f2e0 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
25#include <linux/gfp.h>
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
29#include <linux/freezer.h>
30#include <linux/tcp.h>
31#include <linux/bvec.h>
32#include <linux/highmem.h>
33#include <linux/uaccess.h>
34#include <asm/processor.h>
35#include <linux/mempool.h>
36#include "cifspdu.h"
37#include "cifsglob.h"
38#include "cifsproto.h"
39#include "cifs_debug.h"
40#include "smb2proto.h"
41#include "smbdirect.h"
42
43/* Max number of iovectors we can use off the stack when sending requests. */
44#define CIFS_MAX_IOV_SIZE 8
45
46void
47cifs_wake_up_task(struct mid_q_entry *mid)
48{
49 wake_up_process(mid->callback_data);
50}
51
52struct mid_q_entry *
53AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
54{
55 struct mid_q_entry *temp;
56
57 if (server == NULL) {
58 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
59 return NULL;
60 }
61
62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
63 memset(temp, 0, sizeof(struct mid_q_entry));
64 kref_init(&temp->refcount);
65 temp->mid = get_mid(smb_buffer);
66 temp->pid = current->pid;
67 temp->command = cpu_to_le16(smb_buffer->Command);
68 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
69 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
70 /* when mid allocated can be before when sent */
71 temp->when_alloc = jiffies;
72 temp->server = server;
73
74 /*
75 * The default is for the mid to be synchronous, so the
76 * default callback just wakes up the current task.
77 */
78 temp->callback = cifs_wake_up_task;
79 temp->callback_data = current;
80
81 atomic_inc(&midCount);
82 temp->mid_state = MID_REQUEST_ALLOCATED;
83 return temp;
84}
85
86static void _cifs_mid_q_entry_release(struct kref *refcount)
87{
88 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
89 refcount);
90
91 mempool_free(mid, cifs_mid_poolp);
92}
93
94void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
95{
96 spin_lock(&GlobalMid_Lock);
97 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
98 spin_unlock(&GlobalMid_Lock);
99}
100
101void
102DeleteMidQEntry(struct mid_q_entry *midEntry)
103{
104#ifdef CONFIG_CIFS_STATS2
105 __le16 command = midEntry->server->vals->lock_cmd;
106 unsigned long now;
107#endif
108 midEntry->mid_state = MID_FREE;
109 atomic_dec(&midCount);
110 if (midEntry->large_buf)
111 cifs_buf_release(midEntry->resp_buf);
112 else
113 cifs_small_buf_release(midEntry->resp_buf);
114#ifdef CONFIG_CIFS_STATS2
115 now = jiffies;
116 /* commands taking longer than one second are indications that
117 something is wrong, unless it is quite a slow link or server */
118 if (time_after(now, midEntry->when_alloc + HZ) &&
119 (midEntry->command != command)) {
120 /* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command */
121 if ((le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS) &&
122 (le16_to_cpu(midEntry->command) >= 0))
123 cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
124
125 trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
126 midEntry->mid, midEntry->pid,
127 midEntry->when_sent, midEntry->when_received);
128 if (cifsFYI & CIFS_TIMER) {
129 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
130 midEntry->command, midEntry->mid);
131 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
132 now - midEntry->when_alloc,
133 now - midEntry->when_sent,
134 now - midEntry->when_received);
135 }
136 }
137#endif
138 cifs_mid_q_entry_release(midEntry);
139}
140
141void
142cifs_delete_mid(struct mid_q_entry *mid)
143{
144 spin_lock(&GlobalMid_Lock);
145 list_del_init(&mid->qhead);
146 mid->mid_flags |= MID_DELETED;
147 spin_unlock(&GlobalMid_Lock);
148
149 DeleteMidQEntry(mid);
150}
151
152/*
153 * smb_send_kvec - send an array of kvecs to the server
154 * @server: Server to send the data to
155 * @smb_msg: Message to send
156 * @sent: amount of data sent on socket is stored here
157 *
158 * Our basic "send data to server" function. Should be called with srv_mutex
159 * held. The caller is responsible for handling the results.
160 */
161static int
162smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
163 size_t *sent)
164{
165 int rc = 0;
166 int retries = 0;
167 struct socket *ssocket = server->ssocket;
168
169 *sent = 0;
170
171 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
172 smb_msg->msg_namelen = sizeof(struct sockaddr);
173 smb_msg->msg_control = NULL;
174 smb_msg->msg_controllen = 0;
175 if (server->noblocksnd)
176 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
177 else
178 smb_msg->msg_flags = MSG_NOSIGNAL;
179
180 while (msg_data_left(smb_msg)) {
181 /*
182 * If blocking send, we try 3 times, since each can block
183 * for 5 seconds. For nonblocking we have to try more
184 * but wait increasing amounts of time allowing time for
185 * socket to clear. The overall time we wait in either
186 * case to send on the socket is about 15 seconds.
187 * Similarly we wait for 15 seconds for a response from
188 * the server in SendReceive[2] for the server to send
189 * a response back for most types of requests (except
190 * SMB Write past end of file which can be slow, and
191 * blocking lock operations). NFS waits slightly longer
192 * than CIFS, but this can make it take longer for
193 * nonresponsive servers to be detected and 15 seconds
194 * is more than enough time for modern networks to
195 * send a packet. In most cases if we fail to send
196 * after the retries we will kill the socket and
197 * reconnect which may clear the network problem.
198 */
199 rc = sock_sendmsg(ssocket, smb_msg);
200 if (rc == -EAGAIN) {
201 retries++;
202 if (retries >= 14 ||
203 (!server->noblocksnd && (retries > 2))) {
204 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
205 ssocket);
206 return -EAGAIN;
207 }
208 msleep(1 << retries);
209 continue;
210 }
211
212 if (rc < 0)
213 return rc;
214
215 if (rc == 0) {
216 /* should never happen, letting socket clear before
217 retrying is our only obvious option here */
218 cifs_dbg(VFS, "tcp sent no data\n");
219 msleep(500);
220 continue;
221 }
222
223 /* send was at least partially successful */
224 *sent += rc;
225 retries = 0; /* in case we get ENOSPC on the next send */
226 }
227 return 0;
228}
229
230unsigned long
231smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
232{
233 unsigned int i;
234 struct kvec *iov;
235 int nvec;
236 unsigned long buflen = 0;
237
238 if (server->vals->header_preamble_size == 0 &&
239 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
240 iov = &rqst->rq_iov[1];
241 nvec = rqst->rq_nvec - 1;
242 } else {
243 iov = rqst->rq_iov;
244 nvec = rqst->rq_nvec;
245 }
246
247 /* total up iov array first */
248 for (i = 0; i < nvec; i++)
249 buflen += iov[i].iov_len;
250
251 /*
252 * Add in the page array if there is one. The caller needs to make
253 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
254 * multiple pages ends at page boundary, rq_tailsz needs to be set to
255 * PAGE_SIZE.
256 */
257 if (rqst->rq_npages) {
258 if (rqst->rq_npages == 1)
259 buflen += rqst->rq_tailsz;
260 else {
261 /*
262 * If there is more than one page, calculate the
263 * buffer length based on rq_offset and rq_tailsz
264 */
265 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
266 rqst->rq_offset;
267 buflen += rqst->rq_tailsz;
268 }
269 }
270
271 return buflen;
272}
273
274static int
275__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
276 struct smb_rqst *rqst)
277{
278 int rc = 0;
279 struct kvec *iov;
280 int n_vec;
281 unsigned int send_length = 0;
282 unsigned int i, j;
283 size_t total_len = 0, sent, size;
284 struct socket *ssocket = server->ssocket;
285 struct msghdr smb_msg;
286 int val = 1;
287 __be32 rfc1002_marker;
288
289 if (cifs_rdma_enabled(server)) {
290 /* return -EAGAIN when connecting or reconnecting */
291 rc = -EAGAIN;
292 if (server->smbd_conn)
293 rc = smbd_send(server, num_rqst, rqst);
294 goto smbd_done;
295 }
296 if (ssocket == NULL)
297 return -ENOTSOCK;
298
299 /* cork the socket */
300 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
301 (char *)&val, sizeof(val));
302
303 for (j = 0; j < num_rqst; j++)
304 send_length += smb_rqst_len(server, &rqst[j]);
305 rfc1002_marker = cpu_to_be32(send_length);
306
307 /* Generate a rfc1002 marker for SMB2+ */
308 if (server->vals->header_preamble_size == 0) {
309 struct kvec hiov = {
310 .iov_base = &rfc1002_marker,
311 .iov_len = 4
312 };
313 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
314 1, 4);
315 rc = smb_send_kvec(server, &smb_msg, &sent);
316 if (rc < 0)
317 goto uncork;
318
319 total_len += sent;
320 send_length += 4;
321 }
322
323 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
324
325 for (j = 0; j < num_rqst; j++) {
326 iov = rqst[j].rq_iov;
327 n_vec = rqst[j].rq_nvec;
328
329 size = 0;
330 for (i = 0; i < n_vec; i++) {
331 dump_smb(iov[i].iov_base, iov[i].iov_len);
332 size += iov[i].iov_len;
333 }
334
335 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
336 iov, n_vec, size);
337
338 rc = smb_send_kvec(server, &smb_msg, &sent);
339 if (rc < 0)
340 goto uncork;
341
342 total_len += sent;
343
344 /* now walk the page array and send each page in it */
345 for (i = 0; i < rqst[j].rq_npages; i++) {
346 struct bio_vec bvec;
347
348 bvec.bv_page = rqst[j].rq_pages[i];
349 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
350 &bvec.bv_offset);
351
352 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
353 &bvec, 1, bvec.bv_len);
354 rc = smb_send_kvec(server, &smb_msg, &sent);
355 if (rc < 0)
356 break;
357
358 total_len += sent;
359 }
360 }
361
362uncork:
363 /* uncork it */
364 val = 0;
365 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
366 (char *)&val, sizeof(val));
367
368 if ((total_len > 0) && (total_len != send_length)) {
369 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
370 send_length, total_len);
371 /*
372 * If we have only sent part of an SMB then the next SMB could
373 * be taken as the remainder of this one. We need to kill the
374 * socket so the server throws away the partial SMB
375 */
376 server->tcpStatus = CifsNeedReconnect;
377 trace_smb3_partial_send_reconnect(server->CurrentMid,
378 server->hostname);
379 }
380smbd_done:
381 if (rc < 0 && rc != -EINTR)
382 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
383 rc);
384 else if (rc > 0)
385 rc = 0;
386
387 return rc;
388}
389
390static int
391smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
392 struct smb_rqst *rqst, int flags)
393{
394 struct kvec iov;
395 struct smb2_transform_hdr tr_hdr;
396 struct smb_rqst cur_rqst[MAX_COMPOUND];
397 int rc;
398
399 if (!(flags & CIFS_TRANSFORM_REQ))
400 return __smb_send_rqst(server, num_rqst, rqst);
401
402 if (num_rqst > MAX_COMPOUND - 1)
403 return -ENOMEM;
404
405 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
406 memset(&iov, 0, sizeof(iov));
407 memset(&tr_hdr, 0, sizeof(tr_hdr));
408
409 iov.iov_base = &tr_hdr;
410 iov.iov_len = sizeof(tr_hdr);
411 cur_rqst[0].rq_iov = &iov;
412 cur_rqst[0].rq_nvec = 1;
413
414 if (!server->ops->init_transform_rq) {
415 cifs_dbg(VFS, "Encryption requested but transform callback "
416 "is missing\n");
417 return -EIO;
418 }
419
420 rc = server->ops->init_transform_rq(server, num_rqst + 1,
421 &cur_rqst[0], rqst);
422 if (rc)
423 return rc;
424
425 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
426 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
427 return rc;
428}
429
430int
431smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
432 unsigned int smb_buf_length)
433{
434 struct kvec iov[2];
435 struct smb_rqst rqst = { .rq_iov = iov,
436 .rq_nvec = 2 };
437
438 iov[0].iov_base = smb_buffer;
439 iov[0].iov_len = 4;
440 iov[1].iov_base = (char *)smb_buffer + 4;
441 iov[1].iov_len = smb_buf_length;
442
443 return __smb_send_rqst(server, 1, &rqst);
444}
445
446static int
447wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
448 int *credits)
449{
450 int rc;
451
452 spin_lock(&server->req_lock);
453 if (timeout == CIFS_ASYNC_OP) {
454 /* oplock breaks must not be held up */
455 server->in_flight++;
456 *credits -= 1;
457 spin_unlock(&server->req_lock);
458 return 0;
459 }
460
461 while (1) {
462 if (*credits <= 0) {
463 spin_unlock(&server->req_lock);
464 cifs_num_waiters_inc(server);
465 rc = wait_event_killable(server->request_q,
466 has_credits(server, credits));
467 cifs_num_waiters_dec(server);
468 if (rc)
469 return rc;
470 spin_lock(&server->req_lock);
471 } else {
472 if (server->tcpStatus == CifsExiting) {
473 spin_unlock(&server->req_lock);
474 return -ENOENT;
475 }
476
477 /*
478 * Can not count locking commands against total
479 * as they are allowed to block on server.
480 */
481
482 /* update # of requests on the wire to server */
483 if (timeout != CIFS_BLOCKING_OP) {
484 *credits -= 1;
485 server->in_flight++;
486 }
487 spin_unlock(&server->req_lock);
488 break;
489 }
490 }
491 return 0;
492}
493
494static int
495wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
496 const int optype)
497{
498 int *val;
499
500 val = server->ops->get_credits_field(server, optype);
501 /* Since an echo is already inflight, no need to wait to send another */
502 if (*val <= 0 && optype == CIFS_ECHO_OP)
503 return -EAGAIN;
504 return wait_for_free_credits(server, timeout, val);
505}
506
507int
508cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
509 unsigned int *num, unsigned int *credits)
510{
511 *num = size;
512 *credits = 0;
513 return 0;
514}
515
516static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
517 struct mid_q_entry **ppmidQ)
518{
519 if (ses->server->tcpStatus == CifsExiting) {
520 return -ENOENT;
521 }
522
523 if (ses->server->tcpStatus == CifsNeedReconnect) {
524 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
525 return -EAGAIN;
526 }
527
528 if (ses->status == CifsNew) {
529 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
530 (in_buf->Command != SMB_COM_NEGOTIATE))
531 return -EAGAIN;
532 /* else ok - we are setting up session */
533 }
534
535 if (ses->status == CifsExiting) {
536 /* check if SMB session is bad because we are setting it up */
537 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
538 return -EAGAIN;
539 /* else ok - we are shutting down session */
540 }
541
542 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
543 if (*ppmidQ == NULL)
544 return -ENOMEM;
545 spin_lock(&GlobalMid_Lock);
546 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
547 spin_unlock(&GlobalMid_Lock);
548 return 0;
549}
550
551static int
552wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
553{
554 int error;
555
556 error = wait_event_freezekillable_unsafe(server->response_q,
557 midQ->mid_state != MID_REQUEST_SUBMITTED);
558 if (error < 0)
559 return -ERESTARTSYS;
560
561 return 0;
562}
563
564struct mid_q_entry *
565cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
566{
567 int rc;
568 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
569 struct mid_q_entry *mid;
570
571 if (rqst->rq_iov[0].iov_len != 4 ||
572 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
573 return ERR_PTR(-EIO);
574
575 /* enable signing if server requires it */
576 if (server->sign)
577 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
578
579 mid = AllocMidQEntry(hdr, server);
580 if (mid == NULL)
581 return ERR_PTR(-ENOMEM);
582
583 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
584 if (rc) {
585 DeleteMidQEntry(mid);
586 return ERR_PTR(rc);
587 }
588
589 return mid;
590}
591
592/*
593 * Send a SMB request and set the callback function in the mid to handle
594 * the result. Caller is responsible for dealing with timeouts.
595 */
596int
597cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
598 mid_receive_t *receive, mid_callback_t *callback,
599 mid_handle_t *handle, void *cbdata, const int flags)
600{
601 int rc, timeout, optype;
602 struct mid_q_entry *mid;
603 unsigned int credits = 0;
604
605 timeout = flags & CIFS_TIMEOUT_MASK;
606 optype = flags & CIFS_OP_MASK;
607
608 if ((flags & CIFS_HAS_CREDITS) == 0) {
609 rc = wait_for_free_request(server, timeout, optype);
610 if (rc)
611 return rc;
612 credits = 1;
613 }
614
615 mutex_lock(&server->srv_mutex);
616 mid = server->ops->setup_async_request(server, rqst);
617 if (IS_ERR(mid)) {
618 mutex_unlock(&server->srv_mutex);
619 add_credits_and_wake_if(server, credits, optype);
620 return PTR_ERR(mid);
621 }
622
623 mid->receive = receive;
624 mid->callback = callback;
625 mid->callback_data = cbdata;
626 mid->handle = handle;
627 mid->mid_state = MID_REQUEST_SUBMITTED;
628
629 /* put it on the pending_mid_q */
630 spin_lock(&GlobalMid_Lock);
631 list_add_tail(&mid->qhead, &server->pending_mid_q);
632 spin_unlock(&GlobalMid_Lock);
633
634 /*
635 * Need to store the time in mid before calling I/O. For call_async,
636 * I/O response may come back and free the mid entry on another thread.
637 */
638 cifs_save_when_sent(mid);
639 cifs_in_send_inc(server);
640 rc = smb_send_rqst(server, 1, rqst, flags);
641 cifs_in_send_dec(server);
642
643 if (rc < 0) {
644 revert_current_mid(server, mid->credits);
645 server->sequence_number -= 2;
646 cifs_delete_mid(mid);
647 }
648
649 mutex_unlock(&server->srv_mutex);
650
651 if (rc == 0)
652 return 0;
653
654 add_credits_and_wake_if(server, credits, optype);
655 return rc;
656}
657
658/*
659 *
660 * Send an SMB Request. No response info (other than return code)
661 * needs to be parsed.
662 *
663 * flags indicate the type of request buffer and how long to wait
664 * and whether to log NT STATUS code (error) before mapping it to POSIX error
665 *
666 */
667int
668SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
669 char *in_buf, int flags)
670{
671 int rc;
672 struct kvec iov[1];
673 struct kvec rsp_iov;
674 int resp_buf_type;
675
676 iov[0].iov_base = in_buf;
677 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
678 flags |= CIFS_NO_RESP;
679 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
680 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
681
682 return rc;
683}
684
685static int
686cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
687{
688 int rc = 0;
689
690 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
691 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
692
693 spin_lock(&GlobalMid_Lock);
694 switch (mid->mid_state) {
695 case MID_RESPONSE_RECEIVED:
696 spin_unlock(&GlobalMid_Lock);
697 return rc;
698 case MID_RETRY_NEEDED:
699 rc = -EAGAIN;
700 break;
701 case MID_RESPONSE_MALFORMED:
702 rc = -EIO;
703 break;
704 case MID_SHUTDOWN:
705 rc = -EHOSTDOWN;
706 break;
707 default:
708 list_del_init(&mid->qhead);
709 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
710 __func__, mid->mid, mid->mid_state);
711 rc = -EIO;
712 }
713 spin_unlock(&GlobalMid_Lock);
714
715 DeleteMidQEntry(mid);
716 return rc;
717}
718
719static inline int
720send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
721 struct mid_q_entry *mid)
722{
723 return server->ops->send_cancel ?
724 server->ops->send_cancel(server, rqst, mid) : 0;
725}
726
727int
728cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
729 bool log_error)
730{
731 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
732
733 dump_smb(mid->resp_buf, min_t(u32, 92, len));
734
735 /* convert the length into a more usable form */
736 if (server->sign) {
737 struct kvec iov[2];
738 int rc = 0;
739 struct smb_rqst rqst = { .rq_iov = iov,
740 .rq_nvec = 2 };
741
742 iov[0].iov_base = mid->resp_buf;
743 iov[0].iov_len = 4;
744 iov[1].iov_base = (char *)mid->resp_buf + 4;
745 iov[1].iov_len = len - 4;
746 /* FIXME: add code to kill session */
747 rc = cifs_verify_signature(&rqst, server,
748 mid->sequence_number);
749 if (rc)
750 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
751 rc);
752 }
753
754 /* BB special case reconnect tid and uid here? */
755 return map_smb_to_linux_error(mid->resp_buf, log_error);
756}
757
758struct mid_q_entry *
759cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
760{
761 int rc;
762 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
763 struct mid_q_entry *mid;
764
765 if (rqst->rq_iov[0].iov_len != 4 ||
766 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
767 return ERR_PTR(-EIO);
768
769 rc = allocate_mid(ses, hdr, &mid);
770 if (rc)
771 return ERR_PTR(rc);
772 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
773 if (rc) {
774 cifs_delete_mid(mid);
775 return ERR_PTR(rc);
776 }
777 return mid;
778}
779
780static void
781cifs_noop_callback(struct mid_q_entry *mid)
782{
783}
784
785int
786compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
787 const int flags, const int num_rqst, struct smb_rqst *rqst,
788 int *resp_buf_type, struct kvec *resp_iov)
789{
790 int i, j, rc = 0;
791 int timeout, optype;
792 struct mid_q_entry *midQ[MAX_COMPOUND];
793 bool cancelled_mid[MAX_COMPOUND] = {false};
794 unsigned int credits[MAX_COMPOUND] = {0};
795 char *buf;
796
797 timeout = flags & CIFS_TIMEOUT_MASK;
798 optype = flags & CIFS_OP_MASK;
799
800 for (i = 0; i < num_rqst; i++)
801 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
802
803 if ((ses == NULL) || (ses->server == NULL)) {
804 cifs_dbg(VFS, "Null session\n");
805 return -EIO;
806 }
807
808 if (ses->server->tcpStatus == CifsExiting)
809 return -ENOENT;
810
811 /*
812 * Ensure we obtain 1 credit per request in the compound chain.
813 * It can be optimized further by waiting for all the credits
814 * at once but this can wait long enough if we don't have enough
815 * credits due to some heavy operations in progress or the server
816 * not granting us much, so a fallback to the current approach is
817 * needed anyway.
818 */
819 for (i = 0; i < num_rqst; i++) {
820 rc = wait_for_free_request(ses->server, timeout, optype);
821 if (rc) {
822 /*
823 * We haven't sent an SMB packet to the server yet but
824 * we already obtained credits for i requests in the
825 * compound chain - need to return those credits back
826 * for future use. Note that we need to call add_credits
827 * multiple times to match the way we obtained credits
828 * in the first place and to account for in flight
829 * requests correctly.
830 */
831 for (j = 0; j < i; j++)
832 add_credits(ses->server, 1, optype);
833 return rc;
834 }
835 credits[i] = 1;
836 }
837
838 /*
839 * Make sure that we sign in the same order that we send on this socket
840 * and avoid races inside tcp sendmsg code that could cause corruption
841 * of smb data.
842 */
843
844 mutex_lock(&ses->server->srv_mutex);
845
846 for (i = 0; i < num_rqst; i++) {
847 midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
848 if (IS_ERR(midQ[i])) {
849 revert_current_mid(ses->server, i);
850 for (j = 0; j < i; j++)
851 cifs_delete_mid(midQ[j]);
852 mutex_unlock(&ses->server->srv_mutex);
853
854 /* Update # of requests on wire to server */
855 for (j = 0; j < num_rqst; j++)
856 add_credits(ses->server, credits[j], optype);
857 return PTR_ERR(midQ[i]);
858 }
859
860 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
861 /*
862 * We don't invoke the callback compounds unless it is the last
863 * request.
864 */
865 if (i < num_rqst - 1)
866 midQ[i]->callback = cifs_noop_callback;
867 }
868 cifs_in_send_inc(ses->server);
869 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
870 cifs_in_send_dec(ses->server);
871
872 for (i = 0; i < num_rqst; i++)
873 cifs_save_when_sent(midQ[i]);
874
875 if (rc < 0) {
876 revert_current_mid(ses->server, num_rqst);
877 ses->server->sequence_number -= 2;
878 }
879
880 mutex_unlock(&ses->server->srv_mutex);
881
882 if (rc < 0)
883 goto out;
884
885 /*
886 * Compounding is never used during session establish.
887 */
888 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
889 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
890 rqst[0].rq_nvec);
891
892 if (timeout == CIFS_ASYNC_OP)
893 goto out;
894
895 for (i = 0; i < num_rqst; i++) {
896 rc = wait_for_response(ses->server, midQ[i]);
897 if (rc != 0) {
898 cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
899 midQ[i]->mid);
900 send_cancel(ses->server, &rqst[i], midQ[i]);
901 spin_lock(&GlobalMid_Lock);
902 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
903 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
904 midQ[i]->callback = DeleteMidQEntry;
905 cancelled_mid[i] = true;
906 }
907 spin_unlock(&GlobalMid_Lock);
908 }
909 }
910
911 for (i = 0; i < num_rqst; i++)
912 if (!cancelled_mid[i] && midQ[i]->resp_buf
913 && (midQ[i]->mid_state == MID_RESPONSE_RECEIVED))
914 credits[i] = ses->server->ops->get_credits(midQ[i]);
915
916 for (i = 0; i < num_rqst; i++) {
917 if (rc < 0)
918 goto out;
919
920 rc = cifs_sync_mid_result(midQ[i], ses->server);
921 if (rc != 0) {
922 /* mark this mid as cancelled to not free it below */
923 cancelled_mid[i] = true;
924 goto out;
925 }
926
927 if (!midQ[i]->resp_buf ||
928 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
929 rc = -EIO;
930 cifs_dbg(FYI, "Bad MID state?\n");
931 goto out;
932 }
933
934 buf = (char *)midQ[i]->resp_buf;
935 resp_iov[i].iov_base = buf;
936 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
937 ses->server->vals->header_preamble_size;
938
939 if (midQ[i]->large_buf)
940 resp_buf_type[i] = CIFS_LARGE_BUFFER;
941 else
942 resp_buf_type[i] = CIFS_SMALL_BUFFER;
943
944 rc = ses->server->ops->check_receive(midQ[i], ses->server,
945 flags & CIFS_LOG_ERROR);
946
947 /* mark it so buf will not be freed by cifs_delete_mid */
948 if ((flags & CIFS_NO_RESP) == 0)
949 midQ[i]->resp_buf = NULL;
950
951 }
952
953 /*
954 * Compounding is never used during session establish.
955 */
956 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
957 struct kvec iov = {
958 .iov_base = resp_iov[0].iov_base,
959 .iov_len = resp_iov[0].iov_len
960 };
961 smb311_update_preauth_hash(ses, &iov, 1);
962 }
963
964out:
965 /*
966 * This will dequeue all mids. After this it is important that the
967 * demultiplex_thread will not process any of these mids any futher.
968 * This is prevented above by using a noop callback that will not
969 * wake this thread except for the very last PDU.
970 */
971 for (i = 0; i < num_rqst; i++) {
972 if (!cancelled_mid[i])
973 cifs_delete_mid(midQ[i]);
974 add_credits(ses->server, credits[i], optype);
975 }
976
977 return rc;
978}
979
980int
981cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
982 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
983 struct kvec *resp_iov)
984{
985 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
986 resp_iov);
987}
988
989int
990SendReceive2(const unsigned int xid, struct cifs_ses *ses,
991 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
992 const int flags, struct kvec *resp_iov)
993{
994 struct smb_rqst rqst;
995 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
996 int rc;
997
998 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
999 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1000 GFP_KERNEL);
1001 if (!new_iov) {
1002 /* otherwise cifs_send_recv below sets resp_buf_type */
1003 *resp_buf_type = CIFS_NO_BUFFER;
1004 return -ENOMEM;
1005 }
1006 } else
1007 new_iov = s_iov;
1008
1009 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1010 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1011
1012 new_iov[0].iov_base = new_iov[1].iov_base;
1013 new_iov[0].iov_len = 4;
1014 new_iov[1].iov_base += 4;
1015 new_iov[1].iov_len -= 4;
1016
1017 memset(&rqst, 0, sizeof(struct smb_rqst));
1018 rqst.rq_iov = new_iov;
1019 rqst.rq_nvec = n_vec + 1;
1020
1021 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1022 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1023 kfree(new_iov);
1024 return rc;
1025}
1026
1027int
1028SendReceive(const unsigned int xid, struct cifs_ses *ses,
1029 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1030 int *pbytes_returned, const int timeout)
1031{
1032 int rc = 0;
1033 struct mid_q_entry *midQ;
1034 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1035 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1036 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1037
1038 if (ses == NULL) {
1039 cifs_dbg(VFS, "Null smb session\n");
1040 return -EIO;
1041 }
1042 if (ses->server == NULL) {
1043 cifs_dbg(VFS, "Null tcp session\n");
1044 return -EIO;
1045 }
1046
1047 if (ses->server->tcpStatus == CifsExiting)
1048 return -ENOENT;
1049
1050 /* Ensure that we do not send more than 50 overlapping requests
1051 to the same server. We may make this configurable later or
1052 use ses->maxReq */
1053
1054 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1055 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1056 len);
1057 return -EIO;
1058 }
1059
1060 rc = wait_for_free_request(ses->server, timeout, 0);
1061 if (rc)
1062 return rc;
1063
1064 /* make sure that we sign in the same order that we send on this socket
1065 and avoid races inside tcp sendmsg code that could cause corruption
1066 of smb data */
1067
1068 mutex_lock(&ses->server->srv_mutex);
1069
1070 rc = allocate_mid(ses, in_buf, &midQ);
1071 if (rc) {
1072 mutex_unlock(&ses->server->srv_mutex);
1073 /* Update # of requests on wire to server */
1074 add_credits(ses->server, 1, 0);
1075 return rc;
1076 }
1077
1078 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1079 if (rc) {
1080 mutex_unlock(&ses->server->srv_mutex);
1081 goto out;
1082 }
1083
1084 midQ->mid_state = MID_REQUEST_SUBMITTED;
1085
1086 cifs_in_send_inc(ses->server);
1087 rc = smb_send(ses->server, in_buf, len);
1088 cifs_in_send_dec(ses->server);
1089 cifs_save_when_sent(midQ);
1090
1091 if (rc < 0)
1092 ses->server->sequence_number -= 2;
1093
1094 mutex_unlock(&ses->server->srv_mutex);
1095
1096 if (rc < 0)
1097 goto out;
1098
1099 if (timeout == CIFS_ASYNC_OP)
1100 goto out;
1101
1102 rc = wait_for_response(ses->server, midQ);
1103 if (rc != 0) {
1104 send_cancel(ses->server, &rqst, midQ);
1105 spin_lock(&GlobalMid_Lock);
1106 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1107 /* no longer considered to be "in-flight" */
1108 midQ->callback = DeleteMidQEntry;
1109 spin_unlock(&GlobalMid_Lock);
1110 add_credits(ses->server, 1, 0);
1111 return rc;
1112 }
1113 spin_unlock(&GlobalMid_Lock);
1114 }
1115
1116 rc = cifs_sync_mid_result(midQ, ses->server);
1117 if (rc != 0) {
1118 add_credits(ses->server, 1, 0);
1119 return rc;
1120 }
1121
1122 if (!midQ->resp_buf || !out_buf ||
1123 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1124 rc = -EIO;
1125 cifs_dbg(VFS, "Bad MID state?\n");
1126 goto out;
1127 }
1128
1129 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1130 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1131 rc = cifs_check_receive(midQ, ses->server, 0);
1132out:
1133 cifs_delete_mid(midQ);
1134 add_credits(ses->server, 1, 0);
1135
1136 return rc;
1137}
1138
1139/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1140 blocking lock to return. */
1141
1142static int
1143send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1144 struct smb_hdr *in_buf,
1145 struct smb_hdr *out_buf)
1146{
1147 int bytes_returned;
1148 struct cifs_ses *ses = tcon->ses;
1149 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1150
1151 /* We just modify the current in_buf to change
1152 the type of lock from LOCKING_ANDX_SHARED_LOCK
1153 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1154 LOCKING_ANDX_CANCEL_LOCK. */
1155
1156 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1157 pSMB->Timeout = 0;
1158 pSMB->hdr.Mid = get_next_mid(ses->server);
1159
1160 return SendReceive(xid, ses, in_buf, out_buf,
1161 &bytes_returned, 0);
1162}
1163
1164int
1165SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1166 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1167 int *pbytes_returned)
1168{
1169 int rc = 0;
1170 int rstart = 0;
1171 struct mid_q_entry *midQ;
1172 struct cifs_ses *ses;
1173 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1174 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1175 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1176
1177 if (tcon == NULL || tcon->ses == NULL) {
1178 cifs_dbg(VFS, "Null smb session\n");
1179 return -EIO;
1180 }
1181 ses = tcon->ses;
1182
1183 if (ses->server == NULL) {
1184 cifs_dbg(VFS, "Null tcp session\n");
1185 return -EIO;
1186 }
1187
1188 if (ses->server->tcpStatus == CifsExiting)
1189 return -ENOENT;
1190
1191 /* Ensure that we do not send more than 50 overlapping requests
1192 to the same server. We may make this configurable later or
1193 use ses->maxReq */
1194
1195 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1196 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1197 len);
1198 return -EIO;
1199 }
1200
1201 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1202 if (rc)
1203 return rc;
1204
1205 /* make sure that we sign in the same order that we send on this socket
1206 and avoid races inside tcp sendmsg code that could cause corruption
1207 of smb data */
1208
1209 mutex_lock(&ses->server->srv_mutex);
1210
1211 rc = allocate_mid(ses, in_buf, &midQ);
1212 if (rc) {
1213 mutex_unlock(&ses->server->srv_mutex);
1214 return rc;
1215 }
1216
1217 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1218 if (rc) {
1219 cifs_delete_mid(midQ);
1220 mutex_unlock(&ses->server->srv_mutex);
1221 return rc;
1222 }
1223
1224 midQ->mid_state = MID_REQUEST_SUBMITTED;
1225 cifs_in_send_inc(ses->server);
1226 rc = smb_send(ses->server, in_buf, len);
1227 cifs_in_send_dec(ses->server);
1228 cifs_save_when_sent(midQ);
1229
1230 if (rc < 0)
1231 ses->server->sequence_number -= 2;
1232
1233 mutex_unlock(&ses->server->srv_mutex);
1234
1235 if (rc < 0) {
1236 cifs_delete_mid(midQ);
1237 return rc;
1238 }
1239
1240 /* Wait for a reply - allow signals to interrupt. */
1241 rc = wait_event_interruptible(ses->server->response_q,
1242 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1243 ((ses->server->tcpStatus != CifsGood) &&
1244 (ses->server->tcpStatus != CifsNew)));
1245
1246 /* Were we interrupted by a signal ? */
1247 if ((rc == -ERESTARTSYS) &&
1248 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1249 ((ses->server->tcpStatus == CifsGood) ||
1250 (ses->server->tcpStatus == CifsNew))) {
1251
1252 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1253 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1254 blocking lock to return. */
1255 rc = send_cancel(ses->server, &rqst, midQ);
1256 if (rc) {
1257 cifs_delete_mid(midQ);
1258 return rc;
1259 }
1260 } else {
1261 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1262 to cause the blocking lock to return. */
1263
1264 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1265
1266 /* If we get -ENOLCK back the lock may have
1267 already been removed. Don't exit in this case. */
1268 if (rc && rc != -ENOLCK) {
1269 cifs_delete_mid(midQ);
1270 return rc;
1271 }
1272 }
1273
1274 rc = wait_for_response(ses->server, midQ);
1275 if (rc) {
1276 send_cancel(ses->server, &rqst, midQ);
1277 spin_lock(&GlobalMid_Lock);
1278 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1279 /* no longer considered to be "in-flight" */
1280 midQ->callback = DeleteMidQEntry;
1281 spin_unlock(&GlobalMid_Lock);
1282 return rc;
1283 }
1284 spin_unlock(&GlobalMid_Lock);
1285 }
1286
1287 /* We got the response - restart system call. */
1288 rstart = 1;
1289 }
1290
1291 rc = cifs_sync_mid_result(midQ, ses->server);
1292 if (rc != 0)
1293 return rc;
1294
1295 /* rcvd frame is ok */
1296 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1297 rc = -EIO;
1298 cifs_dbg(VFS, "Bad MID state?\n");
1299 goto out;
1300 }
1301
1302 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1303 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1304 rc = cifs_check_receive(midQ, ses->server, 0);
1305out:
1306 cifs_delete_mid(midQ);
1307 if (rstart && rc == -EACCES)
1308 return -ERESTARTSYS;
1309 return rc;
1310}