blob: e45f8e321371c2253a871bdcf12f963edbfe4439 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * fs/cifs/misc.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/slab.h>
23#include <linux/ctype.h>
24#include <linux/mempool.h>
25#include <linux/vmalloc.h>
26#include "cifspdu.h"
27#include "cifsglob.h"
28#include "cifsproto.h"
29#include "cifs_debug.h"
30#include "smberr.h"
31#include "nterr.h"
32#include "cifs_unicode.h"
33#include "smb2pdu.h"
34
35extern mempool_t *cifs_sm_req_poolp;
36extern mempool_t *cifs_req_poolp;
37
38/* The xid serves as a useful identifier for each incoming vfs request,
39 in a similar way to the mid which is useful to track each sent smb,
40 and CurrentXid can also provide a running counter (although it
41 will eventually wrap past zero) of the total vfs operations handled
42 since the cifs fs was mounted */
43
44unsigned int
45_get_xid(void)
46{
47 unsigned int xid;
48
49 spin_lock(&GlobalMid_Lock);
50 GlobalTotalActiveXid++;
51
52 /* keep high water mark for number of simultaneous ops in filesystem */
53 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
54 GlobalMaxActiveXid = GlobalTotalActiveXid;
55 if (GlobalTotalActiveXid > 65000)
56 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
57 xid = GlobalCurrentXid++;
58 spin_unlock(&GlobalMid_Lock);
59 return xid;
60}
61
62void
63_free_xid(unsigned int xid)
64{
65 spin_lock(&GlobalMid_Lock);
66 /* if (GlobalTotalActiveXid == 0)
67 BUG(); */
68 GlobalTotalActiveXid--;
69 spin_unlock(&GlobalMid_Lock);
70}
71
72struct cifs_ses *
73sesInfoAlloc(void)
74{
75 struct cifs_ses *ret_buf;
76
77 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
78 if (ret_buf) {
79 atomic_inc(&sesInfoAllocCount);
80 ret_buf->status = CifsNew;
81 ++ret_buf->ses_count;
82 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
83 INIT_LIST_HEAD(&ret_buf->tcon_list);
84 mutex_init(&ret_buf->session_mutex);
85 spin_lock_init(&ret_buf->iface_lock);
86 }
87 return ret_buf;
88}
89
90void
91sesInfoFree(struct cifs_ses *buf_to_free)
92{
93 if (buf_to_free == NULL) {
94 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
95 return;
96 }
97
98 atomic_dec(&sesInfoAllocCount);
99 kfree(buf_to_free->serverOS);
100 kfree(buf_to_free->serverDomain);
101 kfree(buf_to_free->serverNOS);
102 kzfree(buf_to_free->password);
103 kfree(buf_to_free->user_name);
104 kfree(buf_to_free->domainName);
105 kzfree(buf_to_free->auth_key.response);
106 kfree(buf_to_free->iface_list);
107 kzfree(buf_to_free);
108}
109
110struct cifs_tcon *
111tconInfoAlloc(void)
112{
113 struct cifs_tcon *ret_buf;
114 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
115 if (ret_buf) {
116 atomic_inc(&tconInfoAllocCount);
117 ret_buf->tidStatus = CifsNew;
118 ++ret_buf->tc_count;
119 INIT_LIST_HEAD(&ret_buf->openFileList);
120 INIT_LIST_HEAD(&ret_buf->tcon_list);
121 spin_lock_init(&ret_buf->open_file_lock);
122 mutex_init(&ret_buf->crfid.fid_mutex);
123 ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
124 GFP_KERNEL);
125 spin_lock_init(&ret_buf->stat_lock);
126 }
127 return ret_buf;
128}
129
130void
131tconInfoFree(struct cifs_tcon *buf_to_free)
132{
133 if (buf_to_free == NULL) {
134 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
135 return;
136 }
137 atomic_dec(&tconInfoAllocCount);
138 kfree(buf_to_free->nativeFileSystem);
139 kzfree(buf_to_free->password);
140 kfree(buf_to_free->crfid.fid);
141 kfree(buf_to_free);
142}
143
144struct smb_hdr *
145cifs_buf_get(void)
146{
147 struct smb_hdr *ret_buf = NULL;
148 /*
149 * SMB2 header is bigger than CIFS one - no problems to clean some
150 * more bytes for CIFS.
151 */
152 size_t buf_size = sizeof(struct smb2_sync_hdr);
153
154 /*
155 * We could use negotiated size instead of max_msgsize -
156 * but it may be more efficient to always alloc same size
157 * albeit slightly larger than necessary and maxbuffersize
158 * defaults to this and can not be bigger.
159 */
160 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
161
162 /* clear the first few header bytes */
163 /* for most paths, more is cleared in header_assemble */
164 memset(ret_buf, 0, buf_size + 3);
165 atomic_inc(&bufAllocCount);
166#ifdef CONFIG_CIFS_STATS2
167 atomic_inc(&totBufAllocCount);
168#endif /* CONFIG_CIFS_STATS2 */
169
170 return ret_buf;
171}
172
173void
174cifs_buf_release(void *buf_to_free)
175{
176 if (buf_to_free == NULL) {
177 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
178 return;
179 }
180 mempool_free(buf_to_free, cifs_req_poolp);
181
182 atomic_dec(&bufAllocCount);
183 return;
184}
185
186struct smb_hdr *
187cifs_small_buf_get(void)
188{
189 struct smb_hdr *ret_buf = NULL;
190
191/* We could use negotiated size instead of max_msgsize -
192 but it may be more efficient to always alloc same size
193 albeit slightly larger than necessary and maxbuffersize
194 defaults to this and can not be bigger */
195 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
196 /* No need to clear memory here, cleared in header assemble */
197 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
198 atomic_inc(&smBufAllocCount);
199#ifdef CONFIG_CIFS_STATS2
200 atomic_inc(&totSmBufAllocCount);
201#endif /* CONFIG_CIFS_STATS2 */
202
203 return ret_buf;
204}
205
206void
207cifs_small_buf_release(void *buf_to_free)
208{
209
210 if (buf_to_free == NULL) {
211 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
212 return;
213 }
214 mempool_free(buf_to_free, cifs_sm_req_poolp);
215
216 atomic_dec(&smBufAllocCount);
217 return;
218}
219
220void
221free_rsp_buf(int resp_buftype, void *rsp)
222{
223 if (resp_buftype == CIFS_SMALL_BUFFER)
224 cifs_small_buf_release(rsp);
225 else if (resp_buftype == CIFS_LARGE_BUFFER)
226 cifs_buf_release(rsp);
227}
228
229/* NB: MID can not be set if treeCon not passed in, in that
230 case it is responsbility of caller to set the mid */
231void
232header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
233 const struct cifs_tcon *treeCon, int word_count
234 /* length of fixed section (word count) in two byte units */)
235{
236 char *temp = (char *) buffer;
237
238 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
239
240 buffer->smb_buf_length = cpu_to_be32(
241 (2 * word_count) + sizeof(struct smb_hdr) -
242 4 /* RFC 1001 length field does not count */ +
243 2 /* for bcc field itself */) ;
244
245 buffer->Protocol[0] = 0xFF;
246 buffer->Protocol[1] = 'S';
247 buffer->Protocol[2] = 'M';
248 buffer->Protocol[3] = 'B';
249 buffer->Command = smb_command;
250 buffer->Flags = 0x00; /* case sensitive */
251 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
252 buffer->Pid = cpu_to_le16((__u16)current->tgid);
253 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
254 if (treeCon) {
255 buffer->Tid = treeCon->tid;
256 if (treeCon->ses) {
257 if (treeCon->ses->capabilities & CAP_UNICODE)
258 buffer->Flags2 |= SMBFLG2_UNICODE;
259 if (treeCon->ses->capabilities & CAP_STATUS32)
260 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
261
262 /* Uid is not converted */
263 buffer->Uid = treeCon->ses->Suid;
264 buffer->Mid = get_next_mid(treeCon->ses->server);
265 }
266 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
267 buffer->Flags2 |= SMBFLG2_DFS;
268 if (treeCon->nocase)
269 buffer->Flags |= SMBFLG_CASELESS;
270 if ((treeCon->ses) && (treeCon->ses->server))
271 if (treeCon->ses->server->sign)
272 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
273 }
274
275/* endian conversion of flags is now done just before sending */
276 buffer->WordCount = (char) word_count;
277 return;
278}
279
280static int
281check_smb_hdr(struct smb_hdr *smb)
282{
283 /* does it have the right SMB "signature" ? */
284 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
285 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
286 *(unsigned int *)smb->Protocol);
287 return 1;
288 }
289
290 /* if it's a response then accept */
291 if (smb->Flags & SMBFLG_RESPONSE)
292 return 0;
293
294 /* only one valid case where server sends us request */
295 if (smb->Command == SMB_COM_LOCKING_ANDX)
296 return 0;
297
298 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
299 get_mid(smb));
300 return 1;
301}
302
303int
304checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
305{
306 struct smb_hdr *smb = (struct smb_hdr *)buf;
307 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
308 __u32 clc_len; /* calculated length */
309 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
310 total_read, rfclen);
311
312 /* is this frame too small to even get to a BCC? */
313 if (total_read < 2 + sizeof(struct smb_hdr)) {
314 if ((total_read >= sizeof(struct smb_hdr) - 1)
315 && (smb->Status.CifsError != 0)) {
316 /* it's an error return */
317 smb->WordCount = 0;
318 /* some error cases do not return wct and bcc */
319 return 0;
320 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
321 (smb->WordCount == 0)) {
322 char *tmp = (char *)smb;
323 /* Need to work around a bug in two servers here */
324 /* First, check if the part of bcc they sent was zero */
325 if (tmp[sizeof(struct smb_hdr)] == 0) {
326 /* some servers return only half of bcc
327 * on simple responses (wct, bcc both zero)
328 * in particular have seen this on
329 * ulogoffX and FindClose. This leaves
330 * one byte of bcc potentially unitialized
331 */
332 /* zero rest of bcc */
333 tmp[sizeof(struct smb_hdr)+1] = 0;
334 return 0;
335 }
336 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
337 } else {
338 cifs_dbg(VFS, "Length less than smb header size\n");
339 }
340 return -EIO;
341 }
342
343 /* otherwise, there is enough to get to the BCC */
344 if (check_smb_hdr(smb))
345 return -EIO;
346 clc_len = smbCalcSize(smb, server);
347
348 if (4 + rfclen != total_read) {
349 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
350 rfclen);
351 return -EIO;
352 }
353
354 if (4 + rfclen != clc_len) {
355 __u16 mid = get_mid(smb);
356 /* check if bcc wrapped around for large read responses */
357 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
358 /* check if lengths match mod 64K */
359 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
360 return 0; /* bcc wrapped */
361 }
362 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
363 clc_len, 4 + rfclen, mid);
364
365 if (4 + rfclen < clc_len) {
366 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
367 rfclen, mid);
368 return -EIO;
369 } else if (rfclen > clc_len + 512) {
370 /*
371 * Some servers (Windows XP in particular) send more
372 * data than the lengths in the SMB packet would
373 * indicate on certain calls (byte range locks and
374 * trans2 find first calls in particular). While the
375 * client can handle such a frame by ignoring the
376 * trailing data, we choose limit the amount of extra
377 * data to 512 bytes.
378 */
379 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
380 rfclen, mid);
381 return -EIO;
382 }
383 }
384 return 0;
385}
386
387bool
388is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
389{
390 struct smb_hdr *buf = (struct smb_hdr *)buffer;
391 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
392 struct list_head *tmp, *tmp1, *tmp2;
393 struct cifs_ses *ses;
394 struct cifs_tcon *tcon;
395 struct cifsInodeInfo *pCifsInode;
396 struct cifsFileInfo *netfile;
397
398 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
399 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
400 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
401 struct smb_com_transaction_change_notify_rsp *pSMBr =
402 (struct smb_com_transaction_change_notify_rsp *)buf;
403 struct file_notify_information *pnotify;
404 __u32 data_offset = 0;
405 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
406
407 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
408 data_offset = le32_to_cpu(pSMBr->DataOffset);
409
410 if (data_offset >
411 len - sizeof(struct file_notify_information)) {
412 cifs_dbg(FYI, "invalid data_offset %u\n",
413 data_offset);
414 return true;
415 }
416 pnotify = (struct file_notify_information *)
417 ((char *)&pSMBr->hdr.Protocol + data_offset);
418 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
419 pnotify->FileName, pnotify->Action);
420 /* cifs_dump_mem("Rcvd notify Data: ",buf,
421 sizeof(struct smb_hdr)+60); */
422 return true;
423 }
424 if (pSMBr->hdr.Status.CifsError) {
425 cifs_dbg(FYI, "notify err 0x%x\n",
426 pSMBr->hdr.Status.CifsError);
427 return true;
428 }
429 return false;
430 }
431 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
432 return false;
433 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
434 /* no sense logging error on invalid handle on oplock
435 break - harmless race between close request and oplock
436 break response is expected from time to time writing out
437 large dirty files cached on the client */
438 if ((NT_STATUS_INVALID_HANDLE) ==
439 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
440 cifs_dbg(FYI, "invalid handle on oplock break\n");
441 return true;
442 } else if (ERRbadfid ==
443 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
444 return true;
445 } else {
446 return false; /* on valid oplock brk we get "request" */
447 }
448 }
449 if (pSMB->hdr.WordCount != 8)
450 return false;
451
452 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
453 pSMB->LockType, pSMB->OplockLevel);
454 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
455 return false;
456
457 /* look up tcon based on tid & uid */
458 spin_lock(&cifs_tcp_ses_lock);
459 list_for_each(tmp, &srv->smb_ses_list) {
460 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
461 list_for_each(tmp1, &ses->tcon_list) {
462 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
463 if (tcon->tid != buf->Tid)
464 continue;
465
466 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
467 spin_lock(&tcon->open_file_lock);
468 list_for_each(tmp2, &tcon->openFileList) {
469 netfile = list_entry(tmp2, struct cifsFileInfo,
470 tlist);
471 if (pSMB->Fid != netfile->fid.netfid)
472 continue;
473
474 cifs_dbg(FYI, "file id match, oplock break\n");
475 pCifsInode = CIFS_I(d_inode(netfile->dentry));
476
477 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
478 &pCifsInode->flags);
479
480 /*
481 * Set flag if the server downgrades the oplock
482 * to L2 else clear.
483 */
484 if (pSMB->OplockLevel)
485 set_bit(
486 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
487 &pCifsInode->flags);
488 else
489 clear_bit(
490 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
491 &pCifsInode->flags);
492
493 cifs_queue_oplock_break(netfile);
494 netfile->oplock_break_cancelled = false;
495
496 spin_unlock(&tcon->open_file_lock);
497 spin_unlock(&cifs_tcp_ses_lock);
498 return true;
499 }
500 spin_unlock(&tcon->open_file_lock);
501 spin_unlock(&cifs_tcp_ses_lock);
502 cifs_dbg(FYI, "No matching file for oplock break\n");
503 return true;
504 }
505 }
506 spin_unlock(&cifs_tcp_ses_lock);
507 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
508 return true;
509}
510
511void
512dump_smb(void *buf, int smb_buf_length)
513{
514 if (traceSMB == 0)
515 return;
516
517 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
518 smb_buf_length, true);
519}
520
521void
522cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
523{
524 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
525 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
526 cifs_sb->mnt_cifs_serverino_autodisabled = true;
527 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
528 cifs_sb_master_tcon(cifs_sb)->treeName);
529 }
530}
531
532void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
533{
534 oplock &= 0xF;
535
536 if (oplock == OPLOCK_EXCLUSIVE) {
537 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
538 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
539 &cinode->vfs_inode);
540 } else if (oplock == OPLOCK_READ) {
541 cinode->oplock = CIFS_CACHE_READ_FLG;
542 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
543 &cinode->vfs_inode);
544 } else
545 cinode->oplock = 0;
546}
547
548/*
549 * We wait for oplock breaks to be processed before we attempt to perform
550 * writes.
551 */
552int cifs_get_writer(struct cifsInodeInfo *cinode)
553{
554 int rc;
555
556start:
557 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
558 TASK_KILLABLE);
559 if (rc)
560 return rc;
561
562 spin_lock(&cinode->writers_lock);
563 if (!cinode->writers)
564 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
565 cinode->writers++;
566 /* Check to see if we have started servicing an oplock break */
567 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
568 cinode->writers--;
569 if (cinode->writers == 0) {
570 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
571 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
572 }
573 spin_unlock(&cinode->writers_lock);
574 goto start;
575 }
576 spin_unlock(&cinode->writers_lock);
577 return 0;
578}
579
580void cifs_put_writer(struct cifsInodeInfo *cinode)
581{
582 spin_lock(&cinode->writers_lock);
583 cinode->writers--;
584 if (cinode->writers == 0) {
585 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
586 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
587 }
588 spin_unlock(&cinode->writers_lock);
589}
590
591/**
592 * cifs_queue_oplock_break - queue the oplock break handler for cfile
593 *
594 * This function is called from the demultiplex thread when it
595 * receives an oplock break for @cfile.
596 *
597 * Assumes the tcon->open_file_lock is held.
598 * Assumes cfile->file_info_lock is NOT held.
599 */
600void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
601{
602 /*
603 * Bump the handle refcount now while we hold the
604 * open_file_lock to enforce the validity of it for the oplock
605 * break handler. The matching put is done at the end of the
606 * handler.
607 */
608 cifsFileInfo_get(cfile);
609
610 queue_work(cifsoplockd_wq, &cfile->oplock_break);
611}
612
613void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
614{
615 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
616 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
617}
618
619bool
620backup_cred(struct cifs_sb_info *cifs_sb)
621{
622 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
623 if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
624 return true;
625 }
626 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
627 if (in_group_p(cifs_sb->mnt_backupgid))
628 return true;
629 }
630
631 return false;
632}
633
634void
635cifs_del_pending_open(struct cifs_pending_open *open)
636{
637 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
638 list_del(&open->olist);
639 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
640}
641
642void
643cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
644 struct cifs_pending_open *open)
645{
646 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
647 open->oplock = CIFS_OPLOCK_NO_CHANGE;
648 open->tlink = tlink;
649 fid->pending_open = open;
650 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
651}
652
653void
654cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
655 struct cifs_pending_open *open)
656{
657 spin_lock(&tlink_tcon(tlink)->open_file_lock);
658 cifs_add_pending_open_locked(fid, tlink, open);
659 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
660}
661
662/* parses DFS refferal V3 structure
663 * caller is responsible for freeing target_nodes
664 * returns:
665 * - on success - 0
666 * - on failure - errno
667 */
668int
669parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
670 unsigned int *num_of_nodes,
671 struct dfs_info3_param **target_nodes,
672 const struct nls_table *nls_codepage, int remap,
673 const char *searchName, bool is_unicode)
674{
675 int i, rc = 0;
676 char *data_end;
677 struct dfs_referral_level_3 *ref;
678
679 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
680
681 if (*num_of_nodes < 1) {
682 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
683 *num_of_nodes);
684 rc = -EINVAL;
685 goto parse_DFS_referrals_exit;
686 }
687
688 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
689 if (ref->VersionNumber != cpu_to_le16(3)) {
690 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
691 le16_to_cpu(ref->VersionNumber));
692 rc = -EINVAL;
693 goto parse_DFS_referrals_exit;
694 }
695
696 /* get the upper boundary of the resp buffer */
697 data_end = (char *)rsp + rsp_size;
698
699 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
700 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
701
702 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
703 GFP_KERNEL);
704 if (*target_nodes == NULL) {
705 rc = -ENOMEM;
706 goto parse_DFS_referrals_exit;
707 }
708
709 /* collect necessary data from referrals */
710 for (i = 0; i < *num_of_nodes; i++) {
711 char *temp;
712 int max_len;
713 struct dfs_info3_param *node = (*target_nodes)+i;
714
715 node->flags = le32_to_cpu(rsp->DFSFlags);
716 if (is_unicode) {
717 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
718 GFP_KERNEL);
719 if (tmp == NULL) {
720 rc = -ENOMEM;
721 goto parse_DFS_referrals_exit;
722 }
723 cifsConvertToUTF16((__le16 *) tmp, searchName,
724 PATH_MAX, nls_codepage, remap);
725 node->path_consumed = cifs_utf16_bytes(tmp,
726 le16_to_cpu(rsp->PathConsumed),
727 nls_codepage);
728 kfree(tmp);
729 } else
730 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
731
732 node->server_type = le16_to_cpu(ref->ServerType);
733 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
734
735 /* copy DfsPath */
736 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
737 max_len = data_end - temp;
738 node->path_name = cifs_strndup_from_utf16(temp, max_len,
739 is_unicode, nls_codepage);
740 if (!node->path_name) {
741 rc = -ENOMEM;
742 goto parse_DFS_referrals_exit;
743 }
744
745 /* copy link target UNC */
746 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
747 max_len = data_end - temp;
748 node->node_name = cifs_strndup_from_utf16(temp, max_len,
749 is_unicode, nls_codepage);
750 if (!node->node_name) {
751 rc = -ENOMEM;
752 goto parse_DFS_referrals_exit;
753 }
754
755 ref++;
756 }
757
758parse_DFS_referrals_exit:
759 if (rc) {
760 free_dfs_info_array(*target_nodes, *num_of_nodes);
761 *target_nodes = NULL;
762 *num_of_nodes = 0;
763 }
764 return rc;
765}
766
767struct cifs_aio_ctx *
768cifs_aio_ctx_alloc(void)
769{
770 struct cifs_aio_ctx *ctx;
771
772 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
773 if (!ctx)
774 return NULL;
775
776 INIT_LIST_HEAD(&ctx->list);
777 mutex_init(&ctx->aio_mutex);
778 init_completion(&ctx->done);
779 kref_init(&ctx->refcount);
780 return ctx;
781}
782
783void
784cifs_aio_ctx_release(struct kref *refcount)
785{
786 struct cifs_aio_ctx *ctx = container_of(refcount,
787 struct cifs_aio_ctx, refcount);
788
789 cifsFileInfo_put(ctx->cfile);
790 kvfree(ctx->bv);
791 kfree(ctx);
792}
793
794#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
795
796int
797setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
798{
799 ssize_t rc;
800 unsigned int cur_npages;
801 unsigned int npages = 0;
802 unsigned int i;
803 size_t len;
804 size_t count = iov_iter_count(iter);
805 unsigned int saved_len;
806 size_t start;
807 unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
808 struct page **pages = NULL;
809 struct bio_vec *bv = NULL;
810
811 if (iter->type & ITER_KVEC) {
812 memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
813 ctx->len = count;
814 iov_iter_advance(iter, count);
815 return 0;
816 }
817
818 if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
819 bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
820 GFP_KERNEL);
821
822 if (!bv) {
823 bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
824 if (!bv)
825 return -ENOMEM;
826 }
827
828 if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
829 pages = kmalloc_array(max_pages, sizeof(struct page *),
830 GFP_KERNEL);
831
832 if (!pages) {
833 pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
834 if (!pages) {
835 kvfree(bv);
836 return -ENOMEM;
837 }
838 }
839
840 saved_len = count;
841
842 while (count && npages < max_pages) {
843 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
844 if (rc < 0) {
845 cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
846 break;
847 }
848
849 if (rc > count) {
850 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
851 count);
852 break;
853 }
854
855 iov_iter_advance(iter, rc);
856 count -= rc;
857 rc += start;
858 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
859
860 if (npages + cur_npages > max_pages) {
861 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
862 npages + cur_npages, max_pages);
863 break;
864 }
865
866 for (i = 0; i < cur_npages; i++) {
867 len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
868 bv[npages + i].bv_page = pages[i];
869 bv[npages + i].bv_offset = start;
870 bv[npages + i].bv_len = len - start;
871 rc -= len;
872 start = 0;
873 }
874
875 npages += cur_npages;
876 }
877
878 kvfree(pages);
879 ctx->bv = bv;
880 ctx->len = saved_len - count;
881 ctx->npages = npages;
882 iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
883 return 0;
884}
885
886/**
887 * cifs_alloc_hash - allocate hash and hash context together
888 *
889 * The caller has to make sure @sdesc is initialized to either NULL or
890 * a valid context. Both can be freed via cifs_free_hash().
891 */
892int
893cifs_alloc_hash(const char *name,
894 struct crypto_shash **shash, struct sdesc **sdesc)
895{
896 int rc = 0;
897 size_t size;
898
899 if (*sdesc != NULL)
900 return 0;
901
902 *shash = crypto_alloc_shash(name, 0, 0);
903 if (IS_ERR(*shash)) {
904 cifs_dbg(VFS, "could not allocate crypto %s\n", name);
905 rc = PTR_ERR(*shash);
906 *shash = NULL;
907 *sdesc = NULL;
908 return rc;
909 }
910
911 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
912 *sdesc = kmalloc(size, GFP_KERNEL);
913 if (*sdesc == NULL) {
914 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
915 crypto_free_shash(*shash);
916 *shash = NULL;
917 return -ENOMEM;
918 }
919
920 (*sdesc)->shash.tfm = *shash;
921 (*sdesc)->shash.flags = 0x0;
922 return 0;
923}
924
925/**
926 * cifs_free_hash - free hash and hash context together
927 *
928 * Freeing a NULL hash or context is safe.
929 */
930void
931cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
932{
933 kfree(*sdesc);
934 *sdesc = NULL;
935 if (*shash)
936 crypto_free_shash(*shash);
937 *shash = NULL;
938}
939
940/**
941 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
942 * Input: rqst - a smb_rqst, page - a page index for rqst
943 * Output: *len - the length for this page, *offset - the offset for this page
944 */
945void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
946 unsigned int *len, unsigned int *offset)
947{
948 *len = rqst->rq_pagesz;
949 *offset = (page == 0) ? rqst->rq_offset : 0;
950
951 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
952 *len = rqst->rq_tailsz;
953 else if (page == 0)
954 *len = rqst->rq_pagesz - rqst->rq_offset;
955}