1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
13 #include <linux/module.h>
15 #include <linux/mount.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/seq_file.h>
20 #include <linux/vfs.h>
21 #include <linux/mempool.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/random.h>
27 #include <linux/uuid.h>
28 #include <linux/xattr.h>
29 #include <uapi/linux/magic.h>
33 #define DECLARE_GLOBALS_HERE
35 #include "cifsproto.h"
36 #include "cifs_debug.h"
37 #include "cifs_fs_sb.h"
39 #include <linux/key-type.h>
40 #include "cifs_spnego.h"
42 #ifdef CONFIG_CIFS_DFS_UPCALL
43 #include "dfs_cache.h"
45 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "fs_context.h"
49 #include "cached_dir.h"
52 * DOS dates from 1980/1/1 through 2107/12/31
53 * Protocol specifications indicate the range should be to 119, which
54 * limits maximum year to 2099. But this range has not been checked.
56 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
57 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
58 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
62 bool enable_oplocks = true;
63 bool linuxExtEnabled = true;
64 bool lookupCacheEnabled = true;
65 bool disable_legacy_dialects; /* false by default */
66 bool enable_gcm_256 = true;
67 bool require_gcm_256; /* false by default */
68 bool enable_negotiate_signing; /* false by default */
69 unsigned int global_secflags = CIFSSEC_DEF;
70 /* unsigned int ntlmv2_support = 0; */
71 unsigned int sign_CIFS_PDUs = 1;
74 * Global transaction id (XID) information
76 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
77 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
78 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
79 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 * Global counters, updated atomically
84 atomic_t sesInfoAllocCount;
85 atomic_t tconInfoAllocCount;
86 atomic_t tcpSesNextId;
87 atomic_t tcpSesAllocCount;
88 atomic_t tcpSesReconnectCount;
89 atomic_t tconInfoReconnectCount;
92 atomic_t buf_alloc_count;
93 atomic_t small_buf_alloc_count;
94 #ifdef CONFIG_CIFS_STATS2
95 atomic_t total_buf_alloc_count;
96 atomic_t total_small_buf_alloc_count;
98 struct list_head cifs_tcp_ses_list;
99 spinlock_t cifs_tcp_ses_lock;
100 static const struct super_operations cifs_super_ops;
101 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
102 module_param(CIFSMaxBufSize, uint, 0444);
103 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
104 "for CIFS requests. "
105 "Default: 16384 Range: 8192 to 130048");
106 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
107 module_param(cifs_min_rcv, uint, 0444);
108 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 unsigned int cifs_min_small = 30;
111 module_param(cifs_min_small, uint, 0444);
112 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 unsigned int cifs_max_pending = CIFS_MAX_REQ;
115 module_param(cifs_max_pending, uint, 0444);
116 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
117 "CIFS/SMB1 dialect (N/A for SMB3) "
118 "Default: 32767 Range: 2 to 32767.");
119 #ifdef CONFIG_CIFS_STATS2
120 unsigned int slow_rsp_threshold = 1;
121 module_param(slow_rsp_threshold, uint, 0644);
122 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
123 "before logging that a response is delayed. "
124 "Default: 1 (if set to 0 disables msg).");
127 module_param(enable_oplocks, bool, 0644);
128 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
130 module_param(enable_gcm_256, bool, 0644);
131 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
133 module_param(require_gcm_256, bool, 0644);
134 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
136 module_param(enable_negotiate_signing, bool, 0644);
137 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
139 module_param(disable_legacy_dialects, bool, 0644);
140 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
141 "helpful to restrict the ability to "
142 "override the default dialects (SMB2.1, "
143 "SMB3 and SMB3.02) on mount with old "
144 "dialects (CIFS/SMB1 and SMB2) since "
145 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
146 " and less secure. Default: n/N/0");
148 extern mempool_t *cifs_sm_req_poolp;
149 extern mempool_t *cifs_req_poolp;
150 extern mempool_t *cifs_mid_poolp;
152 struct workqueue_struct *cifsiod_wq;
153 struct workqueue_struct *decrypt_wq;
154 struct workqueue_struct *fileinfo_put_wq;
155 struct workqueue_struct *cifsoplockd_wq;
156 struct workqueue_struct *deferredclose_wq;
157 __u32 cifs_lock_secret;
160 * Bumps refcount for cifs super block.
161 * Note that it should be only called if a referece to VFS super block is
162 * already held, e.g. in open-type syscalls context. Otherwise it can race with
163 * atomic_dec_and_test in deactivate_locked_super.
166 cifs_sb_active(struct super_block *sb)
168 struct cifs_sb_info *server = CIFS_SB(sb);
170 if (atomic_inc_return(&server->active) == 1)
171 atomic_inc(&sb->s_active);
175 cifs_sb_deactive(struct super_block *sb)
177 struct cifs_sb_info *server = CIFS_SB(sb);
179 if (atomic_dec_and_test(&server->active))
180 deactivate_super(sb);
184 cifs_read_super(struct super_block *sb)
187 struct cifs_sb_info *cifs_sb;
188 struct cifs_tcon *tcon;
189 struct timespec64 ts;
192 cifs_sb = CIFS_SB(sb);
193 tcon = cifs_sb_master_tcon(cifs_sb);
195 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
196 sb->s_flags |= SB_POSIXACL;
198 if (tcon->snapshot_time)
199 sb->s_flags |= SB_RDONLY;
201 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
202 sb->s_maxbytes = MAX_LFS_FILESIZE;
204 sb->s_maxbytes = MAX_NON_LFS;
207 * Some very old servers like DOS and OS/2 used 2 second granularity
208 * (while all current servers use 100ns granularity - see MS-DTYP)
209 * but 1 second is the maximum allowed granularity for the VFS
210 * so for old servers set time granularity to 1 second while for
211 * everything else (current servers) set it to 100ns.
213 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
214 ((tcon->ses->capabilities &
215 tcon->ses->server->vals->cap_nt_find) == 0) &&
217 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
218 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
219 sb->s_time_min = ts.tv_sec;
220 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
221 cpu_to_le16(SMB_TIME_MAX), 0);
222 sb->s_time_max = ts.tv_sec;
225 * Almost every server, including all SMB2+, uses DCE TIME
226 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
228 sb->s_time_gran = 100;
229 ts = cifs_NTtimeToUnix(0);
230 sb->s_time_min = ts.tv_sec;
231 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
232 sb->s_time_max = ts.tv_sec;
235 sb->s_magic = CIFS_SUPER_MAGIC;
236 sb->s_op = &cifs_super_ops;
237 sb->s_xattr = cifs_xattr_handlers;
238 rc = super_setup_bdi(sb);
241 /* tune readahead according to rsize if readahead size not set on mount */
242 if (cifs_sb->ctx->rsize == 0)
243 cifs_sb->ctx->rsize =
244 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
245 if (cifs_sb->ctx->rasize)
246 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
248 sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
250 sb->s_blocksize = CIFS_MAX_MSGSIZE;
251 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
252 inode = cifs_root_iget(sb);
260 sb->s_d_op = &cifs_ci_dentry_ops;
262 sb->s_d_op = &cifs_dentry_ops;
264 sb->s_root = d_make_root(inode);
270 #ifdef CONFIG_CIFS_NFSD_EXPORT
271 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
272 cifs_dbg(FYI, "export ops supported\n");
273 sb->s_export_op = &cifs_export_ops;
275 #endif /* CONFIG_CIFS_NFSD_EXPORT */
280 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
284 static void cifs_kill_sb(struct super_block *sb)
286 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
289 * We ned to release all dentries for the cached directories
290 * before we kill the sb.
293 close_all_cached_dirs(cifs_sb);
295 /* finally release root dentry */
297 cifs_sb->root = NULL;
301 cifs_umount(cifs_sb);
305 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
307 struct super_block *sb = dentry->d_sb;
308 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
309 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
310 struct TCP_Server_Info *server = tcon->ses->server;
316 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
318 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
320 buf->f_namelen = PATH_MAX;
322 buf->f_fsid.val[0] = tcon->vol_serial_number;
323 /* are using part of create time for more randomness, see man statfs */
324 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
326 buf->f_files = 0; /* undefined */
327 buf->f_ffree = 0; /* unlimited */
329 if (server->ops->queryfs)
330 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
336 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
338 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
339 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
340 struct TCP_Server_Info *server = tcon->ses->server;
342 if (server->ops->fallocate)
343 return server->ops->fallocate(file, tcon, mode, off, len);
348 static int cifs_permission(struct user_namespace *mnt_userns,
349 struct inode *inode, int mask)
351 struct cifs_sb_info *cifs_sb;
353 cifs_sb = CIFS_SB(inode->i_sb);
355 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
356 if ((mask & MAY_EXEC) && !execute_ok(inode))
360 } else /* file mode might have been restricted at mount time
361 on the client (above and beyond ACL on servers) for
362 servers which do not support setting and viewing mode bits,
363 so allowing client to check permissions is useful */
364 return generic_permission(&init_user_ns, inode, mask);
367 static struct kmem_cache *cifs_inode_cachep;
368 static struct kmem_cache *cifs_req_cachep;
369 static struct kmem_cache *cifs_mid_cachep;
370 static struct kmem_cache *cifs_sm_req_cachep;
371 mempool_t *cifs_sm_req_poolp;
372 mempool_t *cifs_req_poolp;
373 mempool_t *cifs_mid_poolp;
375 static struct inode *
376 cifs_alloc_inode(struct super_block *sb)
378 struct cifsInodeInfo *cifs_inode;
379 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
382 cifs_inode->cifsAttrs = 0x20; /* default */
383 cifs_inode->time = 0;
385 * Until the file is open and we have gotten oplock info back from the
386 * server, can not assume caching of file data or metadata.
388 cifs_set_oplock_level(cifs_inode, 0);
389 cifs_inode->flags = 0;
390 spin_lock_init(&cifs_inode->writers_lock);
391 cifs_inode->writers = 0;
392 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
393 cifs_inode->server_eof = 0;
394 cifs_inode->uniqueid = 0;
395 cifs_inode->createtime = 0;
396 cifs_inode->epoch = 0;
397 spin_lock_init(&cifs_inode->open_file_lock);
398 generate_random_uuid(cifs_inode->lease_key);
401 * Can not set i_flags here - they get immediately overwritten to zero
404 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
405 INIT_LIST_HEAD(&cifs_inode->openFileList);
406 INIT_LIST_HEAD(&cifs_inode->llist);
407 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
408 spin_lock_init(&cifs_inode->deferred_lock);
409 return &cifs_inode->netfs.inode;
413 cifs_free_inode(struct inode *inode)
415 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
419 cifs_evict_inode(struct inode *inode)
421 truncate_inode_pages_final(&inode->i_data);
422 if (inode->i_state & I_PINNING_FSCACHE_WB)
423 cifs_fscache_unuse_inode_cookie(inode, true);
424 cifs_fscache_release_inode_cookie(inode);
429 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
431 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
432 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
434 seq_puts(s, ",addr=");
436 switch (server->dstaddr.ss_family) {
438 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
441 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
442 if (sa6->sin6_scope_id)
443 seq_printf(s, "%%%u", sa6->sin6_scope_id);
446 seq_puts(s, "(unknown)");
449 seq_puts(s, ",rdma");
453 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
455 if (ses->sectype == Unspecified) {
456 if (ses->user_name == NULL)
457 seq_puts(s, ",sec=none");
461 seq_puts(s, ",sec=");
463 switch (ses->sectype) {
465 seq_puts(s, "ntlmv2");
471 seq_puts(s, "ntlmssp");
474 /* shouldn't ever happen */
475 seq_puts(s, "unknown");
482 if (ses->sectype == Kerberos)
483 seq_printf(s, ",cruid=%u",
484 from_kuid_munged(&init_user_ns, ses->cred_uid));
488 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
490 seq_puts(s, ",cache=");
492 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
493 seq_puts(s, "strict");
494 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
496 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
497 seq_puts(s, "singleclient"); /* assume only one client access */
498 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
499 seq_puts(s, "ro"); /* read only caching assumed */
501 seq_puts(s, "loose");
505 * cifs_show_devname() is used so we show the mount device name with correct
506 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
508 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
510 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
511 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
516 convert_delimiter(devname, '/');
517 /* escape all spaces in share names */
518 seq_escape(m, devname, " \t");
525 * cifs_show_options() is for displaying mount options in /proc/mounts.
526 * Not all settable options are displayed but most of the important
530 cifs_show_options(struct seq_file *s, struct dentry *root)
532 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
533 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
534 struct sockaddr *srcaddr;
535 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
537 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
538 cifs_show_security(s, tcon->ses);
539 cifs_show_cache_flavor(s, cifs_sb);
542 seq_puts(s, ",nolease");
543 if (cifs_sb->ctx->multiuser)
544 seq_puts(s, ",multiuser");
545 else if (tcon->ses->user_name)
546 seq_show_option(s, "username", tcon->ses->user_name);
548 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
549 seq_show_option(s, "domain", tcon->ses->domainName);
551 if (srcaddr->sa_family != AF_UNSPEC) {
552 struct sockaddr_in *saddr4;
553 struct sockaddr_in6 *saddr6;
554 saddr4 = (struct sockaddr_in *)srcaddr;
555 saddr6 = (struct sockaddr_in6 *)srcaddr;
556 if (srcaddr->sa_family == AF_INET6)
557 seq_printf(s, ",srcaddr=%pI6c",
559 else if (srcaddr->sa_family == AF_INET)
560 seq_printf(s, ",srcaddr=%pI4",
561 &saddr4->sin_addr.s_addr);
563 seq_printf(s, ",srcaddr=BAD-AF:%i",
564 (int)(srcaddr->sa_family));
567 seq_printf(s, ",uid=%u",
568 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
569 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
570 seq_puts(s, ",forceuid");
572 seq_puts(s, ",noforceuid");
574 seq_printf(s, ",gid=%u",
575 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
576 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
577 seq_puts(s, ",forcegid");
579 seq_puts(s, ",noforcegid");
581 cifs_show_address(s, tcon->ses->server);
584 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
585 cifs_sb->ctx->file_mode,
586 cifs_sb->ctx->dir_mode);
587 if (cifs_sb->ctx->iocharset)
588 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
590 seq_puts(s, ",seal");
591 else if (tcon->ses->server->ignore_signature)
592 seq_puts(s, ",signloosely");
594 seq_puts(s, ",nocase");
596 seq_puts(s, ",nodelete");
597 if (cifs_sb->ctx->no_sparse)
598 seq_puts(s, ",nosparse");
599 if (tcon->local_lease)
600 seq_puts(s, ",locallease");
602 seq_puts(s, ",hard");
604 seq_puts(s, ",soft");
605 if (tcon->use_persistent)
606 seq_puts(s, ",persistenthandles");
607 else if (tcon->use_resilient)
608 seq_puts(s, ",resilienthandles");
609 if (tcon->posix_extensions)
610 seq_puts(s, ",posix");
611 else if (tcon->unix_ext)
612 seq_puts(s, ",unix");
614 seq_puts(s, ",nounix");
615 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
616 seq_puts(s, ",nodfs");
617 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
618 seq_puts(s, ",posixpaths");
619 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
620 seq_puts(s, ",setuids");
621 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
622 seq_puts(s, ",idsfromsid");
623 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
624 seq_puts(s, ",serverino");
625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
626 seq_puts(s, ",rwpidforward");
627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
628 seq_puts(s, ",forcemand");
629 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
630 seq_puts(s, ",nouser_xattr");
631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
632 seq_puts(s, ",mapchars");
633 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
634 seq_puts(s, ",mapposix");
635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
638 seq_puts(s, ",nobrl");
639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
640 seq_puts(s, ",nohandlecache");
641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
642 seq_puts(s, ",modefromsid");
643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
644 seq_puts(s, ",cifsacl");
645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
646 seq_puts(s, ",dynperm");
647 if (root->d_sb->s_flags & SB_POSIXACL)
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
650 seq_puts(s, ",mfsymlinks");
651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
653 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
654 seq_puts(s, ",nostrictsync");
655 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
656 seq_puts(s, ",noperm");
657 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
658 seq_printf(s, ",backupuid=%u",
659 from_kuid_munged(&init_user_ns,
660 cifs_sb->ctx->backupuid));
661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
662 seq_printf(s, ",backupgid=%u",
663 from_kgid_munged(&init_user_ns,
664 cifs_sb->ctx->backupgid));
666 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
667 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
668 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
669 if (cifs_sb->ctx->rasize)
670 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
671 if (tcon->ses->server->min_offload)
672 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
673 seq_printf(s, ",echo_interval=%lu",
674 tcon->ses->server->echo_interval / HZ);
676 /* Only display max_credits if it was overridden on mount */
677 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
678 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
680 if (tcon->snapshot_time)
681 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
682 if (tcon->handle_timeout)
683 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
686 * Display file and directory attribute timeout in seconds.
687 * If file and directory attribute timeout the same then actimeo
688 * was likely specified on mount
690 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
691 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
693 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
694 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
696 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
698 if (tcon->ses->chan_max > 1)
699 seq_printf(s, ",multichannel,max_channels=%zu",
700 tcon->ses->chan_max);
702 if (tcon->use_witness)
703 seq_puts(s, ",witness");
708 static void cifs_umount_begin(struct super_block *sb)
710 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
711 struct cifs_tcon *tcon;
716 tcon = cifs_sb_master_tcon(cifs_sb);
718 spin_lock(&cifs_tcp_ses_lock);
719 spin_lock(&tcon->tc_lock);
720 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
721 /* we have other mounts to same share or we have
722 already tried to force umount this and woken up
723 all waiting network requests, nothing to do */
724 spin_unlock(&tcon->tc_lock);
725 spin_unlock(&cifs_tcp_ses_lock);
727 } else if (tcon->tc_count == 1)
728 tcon->status = TID_EXITING;
729 spin_unlock(&tcon->tc_lock);
730 spin_unlock(&cifs_tcp_ses_lock);
732 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
733 /* cancel_notify_requests(tcon); */
734 if (tcon->ses && tcon->ses->server) {
735 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
736 wake_up_all(&tcon->ses->server->request_q);
737 wake_up_all(&tcon->ses->server->response_q);
738 msleep(1); /* yield */
739 /* we have to kick the requests once more */
740 wake_up_all(&tcon->ses->server->response_q);
747 #ifdef CONFIG_CIFS_STATS2
748 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
755 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
757 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
761 static int cifs_drop_inode(struct inode *inode)
763 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
765 /* no serverino => unconditional eviction */
766 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
767 generic_drop_inode(inode);
770 static const struct super_operations cifs_super_ops = {
771 .statfs = cifs_statfs,
772 .alloc_inode = cifs_alloc_inode,
773 .write_inode = cifs_write_inode,
774 .free_inode = cifs_free_inode,
775 .drop_inode = cifs_drop_inode,
776 .evict_inode = cifs_evict_inode,
777 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
778 .show_devname = cifs_show_devname,
779 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
780 function unless later we add lazy close of inodes or unless the
781 kernel forgets to call us with the same number of releases (closes)
783 .show_options = cifs_show_options,
784 .umount_begin = cifs_umount_begin,
785 #ifdef CONFIG_CIFS_STATS2
786 .show_stats = cifs_show_stats,
791 * Get root dentry from superblock according to prefix path mount option.
792 * Return dentry with refcount + 1 on success and NULL otherwise.
794 static struct dentry *
795 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
797 struct dentry *dentry;
798 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
799 char *full_path = NULL;
803 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
804 return dget(sb->s_root);
806 full_path = cifs_build_path_to_root(ctx, cifs_sb,
807 cifs_sb_master_tcon(cifs_sb), 0);
808 if (full_path == NULL)
809 return ERR_PTR(-ENOMEM);
811 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
813 sep = CIFS_DIR_SEP(cifs_sb);
814 dentry = dget(sb->s_root);
818 struct inode *dir = d_inode(dentry);
819 struct dentry *child;
821 if (!S_ISDIR(dir->i_mode)) {
823 dentry = ERR_PTR(-ENOTDIR);
827 /* skip separators */
834 while (*s && *s != sep)
837 child = lookup_positive_unlocked(p, dentry, s - p);
840 } while (!IS_ERR(dentry));
845 static int cifs_set_super(struct super_block *sb, void *data)
847 struct cifs_mnt_data *mnt_data = data;
848 sb->s_fs_info = mnt_data->cifs_sb;
849 return set_anon_super(sb, NULL);
853 cifs_smb3_do_mount(struct file_system_type *fs_type,
854 int flags, struct smb3_fs_context *old_ctx)
857 struct super_block *sb = NULL;
858 struct cifs_sb_info *cifs_sb = NULL;
859 struct cifs_mnt_data mnt_data;
863 * Prints in Kernel / CIFS log the attempted mount operation
864 * If CIFS_DEBUG && cifs_FYI
867 cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
869 cifs_info("Attempting to mount %s\n", old_ctx->UNC);
871 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
872 if (cifs_sb == NULL) {
873 root = ERR_PTR(-ENOMEM);
877 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
879 root = ERR_PTR(-ENOMEM);
882 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
888 rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
894 rc = cifs_setup_cifs_sb(cifs_sb);
900 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
902 if (!(flags & SB_SILENT))
903 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
909 mnt_data.ctx = cifs_sb->ctx;
910 mnt_data.cifs_sb = cifs_sb;
911 mnt_data.flags = flags;
913 /* BB should we make this contingent on mount parm? */
914 flags |= SB_NODIRATIME | SB_NOATIME;
916 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
919 cifs_umount(cifs_sb);
925 cifs_dbg(FYI, "Use existing superblock\n");
926 cifs_umount(cifs_sb);
929 rc = cifs_read_super(sb);
935 sb->s_flags |= SB_ACTIVE;
938 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
943 cifs_sb->root = dget(root);
945 cifs_dbg(FYI, "dentry root is: %p\n", root);
949 deactivate_locked_super(sb);
953 if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
954 kfree(cifs_sb->prepath);
955 smb3_cleanup_fs_context(cifs_sb->ctx);
964 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
967 struct inode *inode = file_inode(iocb->ki_filp);
969 if (iocb->ki_flags & IOCB_DIRECT)
970 return cifs_user_readv(iocb, iter);
972 rc = cifs_revalidate_mapping(inode);
976 return generic_file_read_iter(iocb, iter);
979 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
981 struct inode *inode = file_inode(iocb->ki_filp);
982 struct cifsInodeInfo *cinode = CIFS_I(inode);
986 if (iocb->ki_filp->f_flags & O_DIRECT) {
987 written = cifs_user_writev(iocb, from);
988 if (written > 0 && CIFS_CACHE_READ(cinode)) {
989 cifs_zap_mapping(inode);
991 "Set no oplock for inode=%p after a write operation\n",
998 written = cifs_get_writer(cinode);
1002 written = generic_file_write_iter(iocb, from);
1004 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1007 rc = filemap_fdatawrite(inode->i_mapping);
1009 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1013 cifs_put_writer(cinode);
1017 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1019 struct cifsFileInfo *cfile = file->private_data;
1020 struct cifs_tcon *tcon;
1023 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1024 * the cached file length
1026 if (whence != SEEK_SET && whence != SEEK_CUR) {
1028 struct inode *inode = file_inode(file);
1031 * We need to be sure that all dirty pages are written and the
1032 * server has the newest file length.
1034 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1035 inode->i_mapping->nrpages != 0) {
1036 rc = filemap_fdatawait(inode->i_mapping);
1038 mapping_set_error(inode->i_mapping, rc);
1043 * Some applications poll for the file length in this strange
1044 * way so we must seek to end on non-oplocked files by
1045 * setting the revalidate time to zero.
1047 CIFS_I(inode)->time = 0;
1049 rc = cifs_revalidate_file_attr(file);
1053 if (cfile && cfile->tlink) {
1054 tcon = tlink_tcon(cfile->tlink);
1055 if (tcon->ses->server->ops->llseek)
1056 return tcon->ses->server->ops->llseek(file, tcon,
1059 return generic_file_llseek(file, offset, whence);
1063 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
1066 * Note that this is called by vfs setlease with i_lock held to
1067 * protect *lease from going away.
1069 struct inode *inode = file_inode(file);
1070 struct cifsFileInfo *cfile = file->private_data;
1072 if (!(S_ISREG(inode->i_mode)))
1075 /* Check if file is oplocked if this is request for new lease */
1076 if (arg == F_UNLCK ||
1077 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1078 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1079 return generic_setlease(file, arg, lease, priv);
1080 else if (tlink_tcon(cfile->tlink)->local_lease &&
1081 !CIFS_CACHE_READ(CIFS_I(inode)))
1083 * If the server claims to support oplock on this file, then we
1084 * still need to check oplock even if the local_lease mount
1085 * option is set, but there are servers which do not support
1086 * oplock for which this mount option may be useful if the user
1087 * knows that the file won't be changed on the server by anyone
1090 return generic_setlease(file, arg, lease, priv);
1095 struct file_system_type cifs_fs_type = {
1096 .owner = THIS_MODULE,
1098 .init_fs_context = smb3_init_fs_context,
1099 .parameters = smb3_fs_parameters,
1100 .kill_sb = cifs_kill_sb,
1101 .fs_flags = FS_RENAME_DOES_D_MOVE,
1103 MODULE_ALIAS_FS("cifs");
1105 struct file_system_type smb3_fs_type = {
1106 .owner = THIS_MODULE,
1108 .init_fs_context = smb3_init_fs_context,
1109 .parameters = smb3_fs_parameters,
1110 .kill_sb = cifs_kill_sb,
1111 .fs_flags = FS_RENAME_DOES_D_MOVE,
1113 MODULE_ALIAS_FS("smb3");
1114 MODULE_ALIAS("smb3");
1116 const struct inode_operations cifs_dir_inode_ops = {
1117 .create = cifs_create,
1118 .atomic_open = cifs_atomic_open,
1119 .lookup = cifs_lookup,
1120 .getattr = cifs_getattr,
1121 .unlink = cifs_unlink,
1122 .link = cifs_hardlink,
1123 .mkdir = cifs_mkdir,
1124 .rmdir = cifs_rmdir,
1125 .rename = cifs_rename2,
1126 .permission = cifs_permission,
1127 .setattr = cifs_setattr,
1128 .symlink = cifs_symlink,
1129 .mknod = cifs_mknod,
1130 .listxattr = cifs_listxattr,
1133 const struct inode_operations cifs_file_inode_ops = {
1134 .setattr = cifs_setattr,
1135 .getattr = cifs_getattr,
1136 .permission = cifs_permission,
1137 .listxattr = cifs_listxattr,
1138 .fiemap = cifs_fiemap,
1141 const struct inode_operations cifs_symlink_inode_ops = {
1142 .get_link = cifs_get_link,
1143 .permission = cifs_permission,
1144 .listxattr = cifs_listxattr,
1147 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1148 struct file *dst_file, loff_t destoff, loff_t len,
1149 unsigned int remap_flags)
1151 struct inode *src_inode = file_inode(src_file);
1152 struct inode *target_inode = file_inode(dst_file);
1153 struct cifsFileInfo *smb_file_src = src_file->private_data;
1154 struct cifsFileInfo *smb_file_target;
1155 struct cifs_tcon *target_tcon;
1159 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1162 cifs_dbg(FYI, "clone range\n");
1166 if (!src_file->private_data || !dst_file->private_data) {
1168 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1172 smb_file_target = dst_file->private_data;
1173 target_tcon = tlink_tcon(smb_file_target->tlink);
1176 * Note: cifs case is easier than btrfs since server responsible for
1177 * checks for proper open modes and file type and if it wants
1178 * server could even support copy of range where source = target
1180 lock_two_nondirectories(target_inode, src_inode);
1183 len = src_inode->i_size - off;
1185 cifs_dbg(FYI, "about to flush pages\n");
1186 /* should we flush first and last page first */
1187 truncate_inode_pages_range(&target_inode->i_data, destoff,
1188 PAGE_ALIGN(destoff + len)-1);
1190 if (target_tcon->ses->server->ops->duplicate_extents)
1191 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1192 smb_file_src, smb_file_target, off, len, destoff);
1196 /* force revalidate of size and timestamps of target file now
1197 that target is updated on the server */
1198 CIFS_I(target_inode)->time = 0;
1199 /* although unlocking in the reverse order from locking is not
1200 strictly necessary here it is a little cleaner to be consistent */
1201 unlock_two_nondirectories(src_inode, target_inode);
1204 return rc < 0 ? rc : len;
1207 ssize_t cifs_file_copychunk_range(unsigned int xid,
1208 struct file *src_file, loff_t off,
1209 struct file *dst_file, loff_t destoff,
1210 size_t len, unsigned int flags)
1212 struct inode *src_inode = file_inode(src_file);
1213 struct inode *target_inode = file_inode(dst_file);
1214 struct cifsFileInfo *smb_file_src;
1215 struct cifsFileInfo *smb_file_target;
1216 struct cifs_tcon *src_tcon;
1217 struct cifs_tcon *target_tcon;
1220 cifs_dbg(FYI, "copychunk range\n");
1222 if (!src_file->private_data || !dst_file->private_data) {
1224 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1229 smb_file_target = dst_file->private_data;
1230 smb_file_src = src_file->private_data;
1231 src_tcon = tlink_tcon(smb_file_src->tlink);
1232 target_tcon = tlink_tcon(smb_file_target->tlink);
1234 if (src_tcon->ses != target_tcon->ses) {
1235 cifs_dbg(VFS, "source and target of copy not on same server\n");
1240 if (!target_tcon->ses->server->ops->copychunk_range)
1244 * Note: cifs case is easier than btrfs since server responsible for
1245 * checks for proper open modes and file type and if it wants
1246 * server could even support copy of range where source = target
1248 lock_two_nondirectories(target_inode, src_inode);
1250 cifs_dbg(FYI, "about to flush pages\n");
1252 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1257 /* should we flush first and last page first */
1258 truncate_inode_pages(&target_inode->i_data, 0);
1260 rc = file_modified(dst_file);
1262 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1263 smb_file_src, smb_file_target, off, len, destoff);
1265 file_accessed(src_file);
1267 /* force revalidate of size and timestamps of target file now
1268 * that target is updated on the server
1270 CIFS_I(target_inode)->time = 0;
1271 /* although unlocking in the reverse order from locking is not
1272 * strictly necessary here it is a little cleaner to be consistent
1274 unlock_two_nondirectories(src_inode, target_inode);
1281 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1282 * is a dummy operation.
1284 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1286 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1292 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1293 struct file *dst_file, loff_t destoff,
1294 size_t len, unsigned int flags)
1296 unsigned int xid = get_xid();
1298 struct cifsFileInfo *cfile = dst_file->private_data;
1300 if (cfile->swapfile)
1303 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1307 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1308 rc = generic_copy_file_range(src_file, off, dst_file,
1309 destoff, len, flags);
1313 const struct file_operations cifs_file_ops = {
1314 .read_iter = cifs_loose_read_iter,
1315 .write_iter = cifs_file_write_iter,
1317 .release = cifs_close,
1319 .flock = cifs_flock,
1320 .fsync = cifs_fsync,
1321 .flush = cifs_flush,
1322 .mmap = cifs_file_mmap,
1323 .splice_read = generic_file_splice_read,
1324 .splice_write = iter_file_splice_write,
1325 .llseek = cifs_llseek,
1326 .unlocked_ioctl = cifs_ioctl,
1327 .copy_file_range = cifs_copy_file_range,
1328 .remap_file_range = cifs_remap_file_range,
1329 .setlease = cifs_setlease,
1330 .fallocate = cifs_fallocate,
1333 const struct file_operations cifs_file_strict_ops = {
1334 .read_iter = cifs_strict_readv,
1335 .write_iter = cifs_strict_writev,
1337 .release = cifs_close,
1339 .flock = cifs_flock,
1340 .fsync = cifs_strict_fsync,
1341 .flush = cifs_flush,
1342 .mmap = cifs_file_strict_mmap,
1343 .splice_read = generic_file_splice_read,
1344 .splice_write = iter_file_splice_write,
1345 .llseek = cifs_llseek,
1346 .unlocked_ioctl = cifs_ioctl,
1347 .copy_file_range = cifs_copy_file_range,
1348 .remap_file_range = cifs_remap_file_range,
1349 .setlease = cifs_setlease,
1350 .fallocate = cifs_fallocate,
1353 const struct file_operations cifs_file_direct_ops = {
1354 .read_iter = cifs_direct_readv,
1355 .write_iter = cifs_direct_writev,
1357 .release = cifs_close,
1359 .flock = cifs_flock,
1360 .fsync = cifs_fsync,
1361 .flush = cifs_flush,
1362 .mmap = cifs_file_mmap,
1363 .splice_read = generic_file_splice_read,
1364 .splice_write = iter_file_splice_write,
1365 .unlocked_ioctl = cifs_ioctl,
1366 .copy_file_range = cifs_copy_file_range,
1367 .remap_file_range = cifs_remap_file_range,
1368 .llseek = cifs_llseek,
1369 .setlease = cifs_setlease,
1370 .fallocate = cifs_fallocate,
1373 const struct file_operations cifs_file_nobrl_ops = {
1374 .read_iter = cifs_loose_read_iter,
1375 .write_iter = cifs_file_write_iter,
1377 .release = cifs_close,
1378 .fsync = cifs_fsync,
1379 .flush = cifs_flush,
1380 .mmap = cifs_file_mmap,
1381 .splice_read = generic_file_splice_read,
1382 .splice_write = iter_file_splice_write,
1383 .llseek = cifs_llseek,
1384 .unlocked_ioctl = cifs_ioctl,
1385 .copy_file_range = cifs_copy_file_range,
1386 .remap_file_range = cifs_remap_file_range,
1387 .setlease = cifs_setlease,
1388 .fallocate = cifs_fallocate,
1391 const struct file_operations cifs_file_strict_nobrl_ops = {
1392 .read_iter = cifs_strict_readv,
1393 .write_iter = cifs_strict_writev,
1395 .release = cifs_close,
1396 .fsync = cifs_strict_fsync,
1397 .flush = cifs_flush,
1398 .mmap = cifs_file_strict_mmap,
1399 .splice_read = generic_file_splice_read,
1400 .splice_write = iter_file_splice_write,
1401 .llseek = cifs_llseek,
1402 .unlocked_ioctl = cifs_ioctl,
1403 .copy_file_range = cifs_copy_file_range,
1404 .remap_file_range = cifs_remap_file_range,
1405 .setlease = cifs_setlease,
1406 .fallocate = cifs_fallocate,
1409 const struct file_operations cifs_file_direct_nobrl_ops = {
1410 .read_iter = cifs_direct_readv,
1411 .write_iter = cifs_direct_writev,
1413 .release = cifs_close,
1414 .fsync = cifs_fsync,
1415 .flush = cifs_flush,
1416 .mmap = cifs_file_mmap,
1417 .splice_read = generic_file_splice_read,
1418 .splice_write = iter_file_splice_write,
1419 .unlocked_ioctl = cifs_ioctl,
1420 .copy_file_range = cifs_copy_file_range,
1421 .remap_file_range = cifs_remap_file_range,
1422 .llseek = cifs_llseek,
1423 .setlease = cifs_setlease,
1424 .fallocate = cifs_fallocate,
1427 const struct file_operations cifs_dir_ops = {
1428 .iterate_shared = cifs_readdir,
1429 .release = cifs_closedir,
1430 .read = generic_read_dir,
1431 .unlocked_ioctl = cifs_ioctl,
1432 .copy_file_range = cifs_copy_file_range,
1433 .remap_file_range = cifs_remap_file_range,
1434 .llseek = generic_file_llseek,
1435 .fsync = cifs_dir_fsync,
1439 cifs_init_once(void *inode)
1441 struct cifsInodeInfo *cifsi = inode;
1443 inode_init_once(&cifsi->netfs.inode);
1444 init_rwsem(&cifsi->lock_sem);
1448 cifs_init_inodecache(void)
1450 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1451 sizeof(struct cifsInodeInfo),
1452 0, (SLAB_RECLAIM_ACCOUNT|
1453 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1455 if (cifs_inode_cachep == NULL)
1462 cifs_destroy_inodecache(void)
1465 * Make sure all delayed rcu free inodes are flushed before we
1469 kmem_cache_destroy(cifs_inode_cachep);
1473 cifs_init_request_bufs(void)
1476 * SMB2 maximum header size is bigger than CIFS one - no problems to
1477 * allocate some more bytes for CIFS.
1479 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1481 if (CIFSMaxBufSize < 8192) {
1482 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1483 Unicode path name has to fit in any SMB/CIFS path based frames */
1484 CIFSMaxBufSize = 8192;
1485 } else if (CIFSMaxBufSize > 1024*127) {
1486 CIFSMaxBufSize = 1024 * 127;
1488 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1491 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1492 CIFSMaxBufSize, CIFSMaxBufSize);
1494 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1495 CIFSMaxBufSize + max_hdr_size, 0,
1496 SLAB_HWCACHE_ALIGN, 0,
1497 CIFSMaxBufSize + max_hdr_size,
1499 if (cifs_req_cachep == NULL)
1502 if (cifs_min_rcv < 1)
1504 else if (cifs_min_rcv > 64) {
1506 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1509 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1512 if (cifs_req_poolp == NULL) {
1513 kmem_cache_destroy(cifs_req_cachep);
1516 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1517 almost all handle based requests (but not write response, nor is it
1518 sufficient for path based requests). A smaller size would have
1519 been more efficient (compacting multiple slab items on one 4k page)
1520 for the case in which debug was on, but this larger size allows
1521 more SMBs to use small buffer alloc and is still much more
1522 efficient to alloc 1 per page off the slab compared to 17K (5page)
1523 alloc of large cifs buffers even when page debugging is on */
1524 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1525 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1526 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1527 if (cifs_sm_req_cachep == NULL) {
1528 mempool_destroy(cifs_req_poolp);
1529 kmem_cache_destroy(cifs_req_cachep);
1533 if (cifs_min_small < 2)
1535 else if (cifs_min_small > 256) {
1536 cifs_min_small = 256;
1537 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1540 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1541 cifs_sm_req_cachep);
1543 if (cifs_sm_req_poolp == NULL) {
1544 mempool_destroy(cifs_req_poolp);
1545 kmem_cache_destroy(cifs_req_cachep);
1546 kmem_cache_destroy(cifs_sm_req_cachep);
1554 cifs_destroy_request_bufs(void)
1556 mempool_destroy(cifs_req_poolp);
1557 kmem_cache_destroy(cifs_req_cachep);
1558 mempool_destroy(cifs_sm_req_poolp);
1559 kmem_cache_destroy(cifs_sm_req_cachep);
1562 static int init_mids(void)
1564 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1565 sizeof(struct mid_q_entry), 0,
1566 SLAB_HWCACHE_ALIGN, NULL);
1567 if (cifs_mid_cachep == NULL)
1570 /* 3 is a reasonable minimum number of simultaneous operations */
1571 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1572 if (cifs_mid_poolp == NULL) {
1573 kmem_cache_destroy(cifs_mid_cachep);
1580 static void destroy_mids(void)
1582 mempool_destroy(cifs_mid_poolp);
1583 kmem_cache_destroy(cifs_mid_cachep);
1591 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1593 * Initialize Global counters
1595 atomic_set(&sesInfoAllocCount, 0);
1596 atomic_set(&tconInfoAllocCount, 0);
1597 atomic_set(&tcpSesNextId, 0);
1598 atomic_set(&tcpSesAllocCount, 0);
1599 atomic_set(&tcpSesReconnectCount, 0);
1600 atomic_set(&tconInfoReconnectCount, 0);
1602 atomic_set(&buf_alloc_count, 0);
1603 atomic_set(&small_buf_alloc_count, 0);
1604 #ifdef CONFIG_CIFS_STATS2
1605 atomic_set(&total_buf_alloc_count, 0);
1606 atomic_set(&total_small_buf_alloc_count, 0);
1607 if (slow_rsp_threshold < 1)
1608 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1609 else if (slow_rsp_threshold > 32767)
1611 "slow response threshold set higher than recommended (0 to 32767)\n");
1612 #endif /* CONFIG_CIFS_STATS2 */
1614 atomic_set(&mid_count, 0);
1615 GlobalCurrentXid = 0;
1616 GlobalTotalActiveXid = 0;
1617 GlobalMaxActiveXid = 0;
1618 spin_lock_init(&cifs_tcp_ses_lock);
1619 spin_lock_init(&GlobalMid_Lock);
1621 cifs_lock_secret = get_random_u32();
1623 if (cifs_max_pending < 2) {
1624 cifs_max_pending = 2;
1625 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1626 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1627 cifs_max_pending = CIFS_MAX_REQ;
1628 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1632 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1635 goto out_clean_proc;
1639 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1640 * so that we don't launch too many worker threads but
1641 * Documentation/core-api/workqueue.rst recommends setting it to 0
1644 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1645 decrypt_wq = alloc_workqueue("smb3decryptd",
1646 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1649 goto out_destroy_cifsiod_wq;
1652 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1653 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1654 if (!fileinfo_put_wq) {
1656 goto out_destroy_decrypt_wq;
1659 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1660 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1661 if (!cifsoplockd_wq) {
1663 goto out_destroy_fileinfo_put_wq;
1666 deferredclose_wq = alloc_workqueue("deferredclose",
1667 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1668 if (!deferredclose_wq) {
1670 goto out_destroy_cifsoplockd_wq;
1673 rc = cifs_init_inodecache();
1675 goto out_destroy_deferredclose_wq;
1679 goto out_destroy_inodecache;
1681 rc = cifs_init_request_bufs();
1683 goto out_destroy_mids;
1685 #ifdef CONFIG_CIFS_DFS_UPCALL
1686 rc = dfs_cache_init();
1688 goto out_destroy_request_bufs;
1689 #endif /* CONFIG_CIFS_DFS_UPCALL */
1690 #ifdef CONFIG_CIFS_UPCALL
1691 rc = init_cifs_spnego();
1693 goto out_destroy_dfs_cache;
1694 #endif /* CONFIG_CIFS_UPCALL */
1695 #ifdef CONFIG_CIFS_SWN_UPCALL
1696 rc = cifs_genl_init();
1698 goto out_register_key_type;
1699 #endif /* CONFIG_CIFS_SWN_UPCALL */
1701 rc = init_cifs_idmap();
1703 goto out_cifs_swn_init;
1705 rc = register_filesystem(&cifs_fs_type);
1707 goto out_init_cifs_idmap;
1709 rc = register_filesystem(&smb3_fs_type);
1711 unregister_filesystem(&cifs_fs_type);
1712 goto out_init_cifs_idmap;
1717 out_init_cifs_idmap:
1720 #ifdef CONFIG_CIFS_SWN_UPCALL
1722 out_register_key_type:
1724 #ifdef CONFIG_CIFS_UPCALL
1726 out_destroy_dfs_cache:
1728 #ifdef CONFIG_CIFS_DFS_UPCALL
1729 dfs_cache_destroy();
1730 out_destroy_request_bufs:
1732 cifs_destroy_request_bufs();
1735 out_destroy_inodecache:
1736 cifs_destroy_inodecache();
1737 out_destroy_deferredclose_wq:
1738 destroy_workqueue(deferredclose_wq);
1739 out_destroy_cifsoplockd_wq:
1740 destroy_workqueue(cifsoplockd_wq);
1741 out_destroy_fileinfo_put_wq:
1742 destroy_workqueue(fileinfo_put_wq);
1743 out_destroy_decrypt_wq:
1744 destroy_workqueue(decrypt_wq);
1745 out_destroy_cifsiod_wq:
1746 destroy_workqueue(cifsiod_wq);
1755 cifs_dbg(NOISY, "exit_smb3\n");
1756 unregister_filesystem(&cifs_fs_type);
1757 unregister_filesystem(&smb3_fs_type);
1758 cifs_dfs_release_automount_timer();
1760 #ifdef CONFIG_CIFS_SWN_UPCALL
1763 #ifdef CONFIG_CIFS_UPCALL
1766 #ifdef CONFIG_CIFS_DFS_UPCALL
1767 dfs_cache_destroy();
1769 cifs_destroy_request_bufs();
1771 cifs_destroy_inodecache();
1772 destroy_workqueue(deferredclose_wq);
1773 destroy_workqueue(cifsoplockd_wq);
1774 destroy_workqueue(decrypt_wq);
1775 destroy_workqueue(fileinfo_put_wq);
1776 destroy_workqueue(cifsiod_wq);
1780 MODULE_AUTHOR("Steve French");
1781 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1783 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1784 "also older servers complying with the SNIA CIFS Specification)");
1785 MODULE_VERSION(CIFS_VERSION);
1786 MODULE_SOFTDEP("ecb");
1787 MODULE_SOFTDEP("hmac");
1788 MODULE_SOFTDEP("md5");
1789 MODULE_SOFTDEP("nls");
1790 MODULE_SOFTDEP("aes");
1791 MODULE_SOFTDEP("cmac");
1792 MODULE_SOFTDEP("sha256");
1793 MODULE_SOFTDEP("sha512");
1794 MODULE_SOFTDEP("aead2");
1795 MODULE_SOFTDEP("ccm");
1796 MODULE_SOFTDEP("gcm");
1797 module_init(init_cifs)
1798 module_exit(exit_cifs)