1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
13 #include <linux/module.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/uuid.h>
29 #include <linux/xattr.h>
30 #include <uapi/linux/magic.h>
34 #define DECLARE_GLOBALS_HERE
36 #include "cifsproto.h"
37 #include "cifs_debug.h"
38 #include "cifs_fs_sb.h"
40 #include <linux/key-type.h>
41 #include "cifs_spnego.h"
43 #ifdef CONFIG_CIFS_DFS_UPCALL
44 #include "dfs_cache.h"
46 #ifdef CONFIG_CIFS_SWN_UPCALL
49 #include "fs_context.h"
50 #include "cached_dir.h"
53 * DOS dates from 1980/1/1 through 2107/12/31
54 * Protocol specifications indicate the range should be to 119, which
55 * limits maximum year to 2099. But this range has not been checked.
57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
63 bool enable_oplocks = true;
64 bool linuxExtEnabled = true;
65 bool lookupCacheEnabled = true;
66 bool disable_legacy_dialects; /* false by default */
67 bool enable_gcm_256 = true;
68 bool require_gcm_256; /* false by default */
69 bool enable_negotiate_signing; /* false by default */
70 unsigned int global_secflags = CIFSSEC_DEF;
71 /* unsigned int ntlmv2_support = 0; */
72 unsigned int sign_CIFS_PDUs = 1;
75 * Global transaction id (XID) information
77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
83 * Global counters, updated atomically
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
99 struct list_head cifs_tcp_ses_list;
100 spinlock_t cifs_tcp_ses_lock;
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 "for CIFS requests. "
106 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 "CIFS/SMB1 dialect (N/A for SMB3) "
119 "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 #ifdef CONFIG_CIFS_STATS2
125 unsigned int slow_rsp_threshold = 1;
126 module_param(slow_rsp_threshold, uint, 0644);
127 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
128 "before logging that a response is delayed. "
129 "Default: 1 (if set to 0 disables msg).");
132 module_param(enable_oplocks, bool, 0644);
133 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 module_param(enable_gcm_256, bool, 0644);
136 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
138 module_param(require_gcm_256, bool, 0644);
139 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 module_param(enable_negotiate_signing, bool, 0644);
142 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 module_param(disable_legacy_dialects, bool, 0644);
145 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
146 "helpful to restrict the ability to "
147 "override the default dialects (SMB2.1, "
148 "SMB3 and SMB3.02) on mount with old "
149 "dialects (CIFS/SMB1 and SMB2) since "
150 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
151 " and less secure. Default: n/N/0");
153 extern mempool_t *cifs_sm_req_poolp;
154 extern mempool_t *cifs_req_poolp;
155 extern mempool_t *cifs_mid_poolp;
157 struct workqueue_struct *cifsiod_wq;
158 struct workqueue_struct *decrypt_wq;
159 struct workqueue_struct *fileinfo_put_wq;
160 struct workqueue_struct *cifsoplockd_wq;
161 struct workqueue_struct *deferredclose_wq;
162 __u32 cifs_lock_secret;
165 * Bumps refcount for cifs super block.
166 * Note that it should be only called if a referece to VFS super block is
167 * already held, e.g. in open-type syscalls context. Otherwise it can race with
168 * atomic_dec_and_test in deactivate_locked_super.
171 cifs_sb_active(struct super_block *sb)
173 struct cifs_sb_info *server = CIFS_SB(sb);
175 if (atomic_inc_return(&server->active) == 1)
176 atomic_inc(&sb->s_active);
180 cifs_sb_deactive(struct super_block *sb)
182 struct cifs_sb_info *server = CIFS_SB(sb);
184 if (atomic_dec_and_test(&server->active))
185 deactivate_super(sb);
189 cifs_read_super(struct super_block *sb)
192 struct cifs_sb_info *cifs_sb;
193 struct cifs_tcon *tcon;
194 struct timespec64 ts;
197 cifs_sb = CIFS_SB(sb);
198 tcon = cifs_sb_master_tcon(cifs_sb);
200 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
201 sb->s_flags |= SB_POSIXACL;
203 if (tcon->snapshot_time)
204 sb->s_flags |= SB_RDONLY;
206 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
207 sb->s_maxbytes = MAX_LFS_FILESIZE;
209 sb->s_maxbytes = MAX_NON_LFS;
212 * Some very old servers like DOS and OS/2 used 2 second granularity
213 * (while all current servers use 100ns granularity - see MS-DTYP)
214 * but 1 second is the maximum allowed granularity for the VFS
215 * so for old servers set time granularity to 1 second while for
216 * everything else (current servers) set it to 100ns.
218 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
219 ((tcon->ses->capabilities &
220 tcon->ses->server->vals->cap_nt_find) == 0) &&
222 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
223 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
224 sb->s_time_min = ts.tv_sec;
225 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
226 cpu_to_le16(SMB_TIME_MAX), 0);
227 sb->s_time_max = ts.tv_sec;
230 * Almost every server, including all SMB2+, uses DCE TIME
231 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
233 sb->s_time_gran = 100;
234 ts = cifs_NTtimeToUnix(0);
235 sb->s_time_min = ts.tv_sec;
236 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
237 sb->s_time_max = ts.tv_sec;
240 sb->s_magic = CIFS_SUPER_MAGIC;
241 sb->s_op = &cifs_super_ops;
242 sb->s_xattr = cifs_xattr_handlers;
243 rc = super_setup_bdi(sb);
246 /* tune readahead according to rsize if readahead size not set on mount */
247 if (cifs_sb->ctx->rsize == 0)
248 cifs_sb->ctx->rsize =
249 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
250 if (cifs_sb->ctx->rasize)
251 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
253 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
255 sb->s_blocksize = CIFS_MAX_MSGSIZE;
256 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
257 inode = cifs_root_iget(sb);
265 sb->s_d_op = &cifs_ci_dentry_ops;
267 sb->s_d_op = &cifs_dentry_ops;
269 sb->s_root = d_make_root(inode);
275 #ifdef CONFIG_CIFS_NFSD_EXPORT
276 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
277 cifs_dbg(FYI, "export ops supported\n");
278 sb->s_export_op = &cifs_export_ops;
280 #endif /* CONFIG_CIFS_NFSD_EXPORT */
285 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
289 static void cifs_kill_sb(struct super_block *sb)
291 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
294 * We ned to release all dentries for the cached directories
295 * before we kill the sb.
298 close_all_cached_dirs(cifs_sb);
300 /* finally release root dentry */
302 cifs_sb->root = NULL;
306 cifs_umount(cifs_sb);
310 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
312 struct super_block *sb = dentry->d_sb;
313 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
314 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
315 struct TCP_Server_Info *server = tcon->ses->server;
321 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
323 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
325 buf->f_namelen = PATH_MAX;
327 buf->f_fsid.val[0] = tcon->vol_serial_number;
328 /* are using part of create time for more randomness, see man statfs */
329 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
331 buf->f_files = 0; /* undefined */
332 buf->f_ffree = 0; /* unlimited */
334 if (server->ops->queryfs)
335 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
341 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
343 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
344 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
345 struct TCP_Server_Info *server = tcon->ses->server;
347 if (server->ops->fallocate)
348 return server->ops->fallocate(file, tcon, mode, off, len);
353 static int cifs_permission(struct mnt_idmap *idmap,
354 struct inode *inode, int mask)
356 struct cifs_sb_info *cifs_sb;
358 cifs_sb = CIFS_SB(inode->i_sb);
360 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
361 if ((mask & MAY_EXEC) && !execute_ok(inode))
365 } else /* file mode might have been restricted at mount time
366 on the client (above and beyond ACL on servers) for
367 servers which do not support setting and viewing mode bits,
368 so allowing client to check permissions is useful */
369 return generic_permission(&nop_mnt_idmap, inode, mask);
372 static struct kmem_cache *cifs_inode_cachep;
373 static struct kmem_cache *cifs_req_cachep;
374 static struct kmem_cache *cifs_mid_cachep;
375 static struct kmem_cache *cifs_sm_req_cachep;
376 mempool_t *cifs_sm_req_poolp;
377 mempool_t *cifs_req_poolp;
378 mempool_t *cifs_mid_poolp;
380 static struct inode *
381 cifs_alloc_inode(struct super_block *sb)
383 struct cifsInodeInfo *cifs_inode;
384 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
387 cifs_inode->cifsAttrs = 0x20; /* default */
388 cifs_inode->time = 0;
390 * Until the file is open and we have gotten oplock info back from the
391 * server, can not assume caching of file data or metadata.
393 cifs_set_oplock_level(cifs_inode, 0);
394 cifs_inode->flags = 0;
395 spin_lock_init(&cifs_inode->writers_lock);
396 cifs_inode->writers = 0;
397 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
398 cifs_inode->server_eof = 0;
399 cifs_inode->uniqueid = 0;
400 cifs_inode->createtime = 0;
401 cifs_inode->epoch = 0;
402 spin_lock_init(&cifs_inode->open_file_lock);
403 generate_random_uuid(cifs_inode->lease_key);
404 cifs_inode->symlink_target = NULL;
407 * Can not set i_flags here - they get immediately overwritten to zero
410 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
411 INIT_LIST_HEAD(&cifs_inode->openFileList);
412 INIT_LIST_HEAD(&cifs_inode->llist);
413 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
414 spin_lock_init(&cifs_inode->deferred_lock);
415 return &cifs_inode->netfs.inode;
419 cifs_free_inode(struct inode *inode)
421 struct cifsInodeInfo *cinode = CIFS_I(inode);
423 if (S_ISLNK(inode->i_mode))
424 kfree(cinode->symlink_target);
425 kmem_cache_free(cifs_inode_cachep, cinode);
429 cifs_evict_inode(struct inode *inode)
431 truncate_inode_pages_final(&inode->i_data);
432 if (inode->i_state & I_PINNING_FSCACHE_WB)
433 cifs_fscache_unuse_inode_cookie(inode, true);
434 cifs_fscache_release_inode_cookie(inode);
439 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
441 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
442 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
444 seq_puts(s, ",addr=");
446 switch (server->dstaddr.ss_family) {
448 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
451 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
452 if (sa6->sin6_scope_id)
453 seq_printf(s, "%%%u", sa6->sin6_scope_id);
456 seq_puts(s, "(unknown)");
459 seq_puts(s, ",rdma");
463 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
465 if (ses->sectype == Unspecified) {
466 if (ses->user_name == NULL)
467 seq_puts(s, ",sec=none");
471 seq_puts(s, ",sec=");
473 switch (ses->sectype) {
475 seq_puts(s, "ntlmv2");
481 seq_puts(s, "ntlmssp");
484 /* shouldn't ever happen */
485 seq_puts(s, "unknown");
492 if (ses->sectype == Kerberos)
493 seq_printf(s, ",cruid=%u",
494 from_kuid_munged(&init_user_ns, ses->cred_uid));
498 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
500 seq_puts(s, ",cache=");
502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
503 seq_puts(s, "strict");
504 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
506 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
507 seq_puts(s, "singleclient"); /* assume only one client access */
508 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
509 seq_puts(s, "ro"); /* read only caching assumed */
511 seq_puts(s, "loose");
515 * cifs_show_devname() is used so we show the mount device name with correct
516 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
518 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
520 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
521 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
526 convert_delimiter(devname, '/');
527 /* escape all spaces in share names */
528 seq_escape(m, devname, " \t");
535 * cifs_show_options() is for displaying mount options in /proc/mounts.
536 * Not all settable options are displayed but most of the important
540 cifs_show_options(struct seq_file *s, struct dentry *root)
542 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
543 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
544 struct sockaddr *srcaddr;
545 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
547 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
548 cifs_show_security(s, tcon->ses);
549 cifs_show_cache_flavor(s, cifs_sb);
552 seq_puts(s, ",nolease");
553 if (cifs_sb->ctx->multiuser)
554 seq_puts(s, ",multiuser");
555 else if (tcon->ses->user_name)
556 seq_show_option(s, "username", tcon->ses->user_name);
558 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
559 seq_show_option(s, "domain", tcon->ses->domainName);
561 if (srcaddr->sa_family != AF_UNSPEC) {
562 struct sockaddr_in *saddr4;
563 struct sockaddr_in6 *saddr6;
564 saddr4 = (struct sockaddr_in *)srcaddr;
565 saddr6 = (struct sockaddr_in6 *)srcaddr;
566 if (srcaddr->sa_family == AF_INET6)
567 seq_printf(s, ",srcaddr=%pI6c",
569 else if (srcaddr->sa_family == AF_INET)
570 seq_printf(s, ",srcaddr=%pI4",
571 &saddr4->sin_addr.s_addr);
573 seq_printf(s, ",srcaddr=BAD-AF:%i",
574 (int)(srcaddr->sa_family));
577 seq_printf(s, ",uid=%u",
578 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
579 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
580 seq_puts(s, ",forceuid");
582 seq_puts(s, ",noforceuid");
584 seq_printf(s, ",gid=%u",
585 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
586 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
587 seq_puts(s, ",forcegid");
589 seq_puts(s, ",noforcegid");
591 cifs_show_address(s, tcon->ses->server);
594 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
595 cifs_sb->ctx->file_mode,
596 cifs_sb->ctx->dir_mode);
597 if (cifs_sb->ctx->iocharset)
598 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
600 seq_puts(s, ",seal");
601 else if (tcon->ses->server->ignore_signature)
602 seq_puts(s, ",signloosely");
604 seq_puts(s, ",nocase");
606 seq_puts(s, ",nodelete");
607 if (cifs_sb->ctx->no_sparse)
608 seq_puts(s, ",nosparse");
609 if (tcon->local_lease)
610 seq_puts(s, ",locallease");
612 seq_puts(s, ",hard");
614 seq_puts(s, ",soft");
615 if (tcon->use_persistent)
616 seq_puts(s, ",persistenthandles");
617 else if (tcon->use_resilient)
618 seq_puts(s, ",resilienthandles");
619 if (tcon->posix_extensions)
620 seq_puts(s, ",posix");
621 else if (tcon->unix_ext)
622 seq_puts(s, ",unix");
624 seq_puts(s, ",nounix");
625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
626 seq_puts(s, ",nodfs");
627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
628 seq_puts(s, ",posixpaths");
629 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
630 seq_puts(s, ",setuids");
631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
632 seq_puts(s, ",idsfromsid");
633 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
634 seq_puts(s, ",serverino");
635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
636 seq_puts(s, ",rwpidforward");
637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
638 seq_puts(s, ",forcemand");
639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
640 seq_puts(s, ",nouser_xattr");
641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
642 seq_puts(s, ",mapchars");
643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
644 seq_puts(s, ",mapposix");
645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
648 seq_puts(s, ",nobrl");
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
650 seq_puts(s, ",nohandlecache");
651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
652 seq_puts(s, ",modefromsid");
653 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
654 seq_puts(s, ",cifsacl");
655 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
656 seq_puts(s, ",dynperm");
657 if (root->d_sb->s_flags & SB_POSIXACL)
659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
660 seq_puts(s, ",mfsymlinks");
661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
663 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
664 seq_puts(s, ",nostrictsync");
665 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
666 seq_puts(s, ",noperm");
667 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
668 seq_printf(s, ",backupuid=%u",
669 from_kuid_munged(&init_user_ns,
670 cifs_sb->ctx->backupuid));
671 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
672 seq_printf(s, ",backupgid=%u",
673 from_kgid_munged(&init_user_ns,
674 cifs_sb->ctx->backupgid));
676 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
677 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
678 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
679 if (cifs_sb->ctx->rasize)
680 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
681 if (tcon->ses->server->min_offload)
682 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
683 seq_printf(s, ",echo_interval=%lu",
684 tcon->ses->server->echo_interval / HZ);
686 /* Only display the following if overridden on mount */
687 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
688 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
689 if (tcon->ses->server->tcp_nodelay)
690 seq_puts(s, ",tcpnodelay");
691 if (tcon->ses->server->noautotune)
692 seq_puts(s, ",noautotune");
693 if (tcon->ses->server->noblocksnd)
694 seq_puts(s, ",noblocksend");
695 if (tcon->ses->server->nosharesock)
696 seq_puts(s, ",nosharesock");
698 if (tcon->snapshot_time)
699 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
700 if (tcon->handle_timeout)
701 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
702 if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
703 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
706 * Display file and directory attribute timeout in seconds.
707 * If file and directory attribute timeout the same then actimeo
708 * was likely specified on mount
710 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
711 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
713 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
714 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
716 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
718 if (tcon->ses->chan_max > 1)
719 seq_printf(s, ",multichannel,max_channels=%zu",
720 tcon->ses->chan_max);
722 if (tcon->use_witness)
723 seq_puts(s, ",witness");
728 static void cifs_umount_begin(struct super_block *sb)
730 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
731 struct cifs_tcon *tcon;
736 tcon = cifs_sb_master_tcon(cifs_sb);
738 spin_lock(&cifs_tcp_ses_lock);
739 spin_lock(&tcon->tc_lock);
740 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
741 /* we have other mounts to same share or we have
742 already tried to umount this and woken up
743 all waiting network requests, nothing to do */
744 spin_unlock(&tcon->tc_lock);
745 spin_unlock(&cifs_tcp_ses_lock);
749 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
750 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
752 spin_unlock(&tcon->tc_lock);
753 spin_unlock(&cifs_tcp_ses_lock);
755 cifs_close_all_deferred_files(tcon);
756 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
757 /* cancel_notify_requests(tcon); */
758 if (tcon->ses && tcon->ses->server) {
759 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
760 wake_up_all(&tcon->ses->server->request_q);
761 wake_up_all(&tcon->ses->server->response_q);
762 msleep(1); /* yield */
763 /* we have to kick the requests once more */
764 wake_up_all(&tcon->ses->server->response_q);
771 static int cifs_freeze(struct super_block *sb)
773 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
774 struct cifs_tcon *tcon;
779 tcon = cifs_sb_master_tcon(cifs_sb);
781 cifs_close_all_deferred_files(tcon);
785 #ifdef CONFIG_CIFS_STATS2
786 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
793 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
795 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
799 static int cifs_drop_inode(struct inode *inode)
801 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
803 /* no serverino => unconditional eviction */
804 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
805 generic_drop_inode(inode);
808 static const struct super_operations cifs_super_ops = {
809 .statfs = cifs_statfs,
810 .alloc_inode = cifs_alloc_inode,
811 .write_inode = cifs_write_inode,
812 .free_inode = cifs_free_inode,
813 .drop_inode = cifs_drop_inode,
814 .evict_inode = cifs_evict_inode,
815 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
816 .show_devname = cifs_show_devname,
817 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
818 function unless later we add lazy close of inodes or unless the
819 kernel forgets to call us with the same number of releases (closes)
821 .show_options = cifs_show_options,
822 .umount_begin = cifs_umount_begin,
823 .freeze_fs = cifs_freeze,
824 #ifdef CONFIG_CIFS_STATS2
825 .show_stats = cifs_show_stats,
830 * Get root dentry from superblock according to prefix path mount option.
831 * Return dentry with refcount + 1 on success and NULL otherwise.
833 static struct dentry *
834 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
836 struct dentry *dentry;
837 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
838 char *full_path = NULL;
842 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
843 return dget(sb->s_root);
845 full_path = cifs_build_path_to_root(ctx, cifs_sb,
846 cifs_sb_master_tcon(cifs_sb), 0);
847 if (full_path == NULL)
848 return ERR_PTR(-ENOMEM);
850 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
852 sep = CIFS_DIR_SEP(cifs_sb);
853 dentry = dget(sb->s_root);
857 struct inode *dir = d_inode(dentry);
858 struct dentry *child;
860 if (!S_ISDIR(dir->i_mode)) {
862 dentry = ERR_PTR(-ENOTDIR);
866 /* skip separators */
873 while (*s && *s != sep)
876 child = lookup_positive_unlocked(p, dentry, s - p);
879 } while (!IS_ERR(dentry));
884 static int cifs_set_super(struct super_block *sb, void *data)
886 struct cifs_mnt_data *mnt_data = data;
887 sb->s_fs_info = mnt_data->cifs_sb;
888 return set_anon_super(sb, NULL);
892 cifs_smb3_do_mount(struct file_system_type *fs_type,
893 int flags, struct smb3_fs_context *old_ctx)
895 struct cifs_mnt_data mnt_data;
896 struct cifs_sb_info *cifs_sb;
897 struct super_block *sb;
902 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
903 old_ctx->source, flags);
905 cifs_info("Attempting to mount %s\n", old_ctx->source);
908 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
910 return ERR_PTR(-ENOMEM);
912 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
914 root = ERR_PTR(-ENOMEM);
917 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
923 rc = cifs_setup_cifs_sb(cifs_sb);
929 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
931 if (!(flags & SB_SILENT))
932 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
938 mnt_data.ctx = cifs_sb->ctx;
939 mnt_data.cifs_sb = cifs_sb;
940 mnt_data.flags = flags;
942 /* BB should we make this contingent on mount parm? */
943 flags |= SB_NODIRATIME | SB_NOATIME;
945 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
947 cifs_umount(cifs_sb);
952 cifs_dbg(FYI, "Use existing superblock\n");
953 cifs_umount(cifs_sb);
956 rc = cifs_read_super(sb);
962 sb->s_flags |= SB_ACTIVE;
965 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
970 cifs_sb->root = dget(root);
972 cifs_dbg(FYI, "dentry root is: %p\n", root);
976 deactivate_locked_super(sb);
979 kfree(cifs_sb->prepath);
980 smb3_cleanup_fs_context(cifs_sb->ctx);
987 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
990 struct inode *inode = file_inode(iocb->ki_filp);
992 if (iocb->ki_flags & IOCB_DIRECT)
993 return cifs_user_readv(iocb, iter);
995 rc = cifs_revalidate_mapping(inode);
999 return generic_file_read_iter(iocb, iter);
1002 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1004 struct inode *inode = file_inode(iocb->ki_filp);
1005 struct cifsInodeInfo *cinode = CIFS_I(inode);
1009 if (iocb->ki_filp->f_flags & O_DIRECT) {
1010 written = cifs_user_writev(iocb, from);
1011 if (written > 0 && CIFS_CACHE_READ(cinode)) {
1012 cifs_zap_mapping(inode);
1014 "Set no oplock for inode=%p after a write operation\n",
1021 written = cifs_get_writer(cinode);
1025 written = generic_file_write_iter(iocb, from);
1027 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1030 rc = filemap_fdatawrite(inode->i_mapping);
1032 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1036 cifs_put_writer(cinode);
1040 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1042 struct cifsFileInfo *cfile = file->private_data;
1043 struct cifs_tcon *tcon;
1046 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1047 * the cached file length
1049 if (whence != SEEK_SET && whence != SEEK_CUR) {
1051 struct inode *inode = file_inode(file);
1054 * We need to be sure that all dirty pages are written and the
1055 * server has the newest file length.
1057 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1058 inode->i_mapping->nrpages != 0) {
1059 rc = filemap_fdatawait(inode->i_mapping);
1061 mapping_set_error(inode->i_mapping, rc);
1066 * Some applications poll for the file length in this strange
1067 * way so we must seek to end on non-oplocked files by
1068 * setting the revalidate time to zero.
1070 CIFS_I(inode)->time = 0;
1072 rc = cifs_revalidate_file_attr(file);
1076 if (cfile && cfile->tlink) {
1077 tcon = tlink_tcon(cfile->tlink);
1078 if (tcon->ses->server->ops->llseek)
1079 return tcon->ses->server->ops->llseek(file, tcon,
1082 return generic_file_llseek(file, offset, whence);
1086 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv)
1089 * Note that this is called by vfs setlease with i_lock held to
1090 * protect *lease from going away.
1092 struct inode *inode = file_inode(file);
1093 struct cifsFileInfo *cfile = file->private_data;
1095 if (!(S_ISREG(inode->i_mode)))
1098 /* Check if file is oplocked if this is request for new lease */
1099 if (arg == F_UNLCK ||
1100 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1101 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1102 return generic_setlease(file, arg, lease, priv);
1103 else if (tlink_tcon(cfile->tlink)->local_lease &&
1104 !CIFS_CACHE_READ(CIFS_I(inode)))
1106 * If the server claims to support oplock on this file, then we
1107 * still need to check oplock even if the local_lease mount
1108 * option is set, but there are servers which do not support
1109 * oplock for which this mount option may be useful if the user
1110 * knows that the file won't be changed on the server by anyone
1113 return generic_setlease(file, arg, lease, priv);
1118 struct file_system_type cifs_fs_type = {
1119 .owner = THIS_MODULE,
1121 .init_fs_context = smb3_init_fs_context,
1122 .parameters = smb3_fs_parameters,
1123 .kill_sb = cifs_kill_sb,
1124 .fs_flags = FS_RENAME_DOES_D_MOVE,
1126 MODULE_ALIAS_FS("cifs");
1128 struct file_system_type smb3_fs_type = {
1129 .owner = THIS_MODULE,
1131 .init_fs_context = smb3_init_fs_context,
1132 .parameters = smb3_fs_parameters,
1133 .kill_sb = cifs_kill_sb,
1134 .fs_flags = FS_RENAME_DOES_D_MOVE,
1136 MODULE_ALIAS_FS("smb3");
1137 MODULE_ALIAS("smb3");
1139 const struct inode_operations cifs_dir_inode_ops = {
1140 .create = cifs_create,
1141 .atomic_open = cifs_atomic_open,
1142 .lookup = cifs_lookup,
1143 .getattr = cifs_getattr,
1144 .unlink = cifs_unlink,
1145 .link = cifs_hardlink,
1146 .mkdir = cifs_mkdir,
1147 .rmdir = cifs_rmdir,
1148 .rename = cifs_rename2,
1149 .permission = cifs_permission,
1150 .setattr = cifs_setattr,
1151 .symlink = cifs_symlink,
1152 .mknod = cifs_mknod,
1153 .listxattr = cifs_listxattr,
1154 .get_acl = cifs_get_acl,
1155 .set_acl = cifs_set_acl,
1158 const struct inode_operations cifs_file_inode_ops = {
1159 .setattr = cifs_setattr,
1160 .getattr = cifs_getattr,
1161 .permission = cifs_permission,
1162 .listxattr = cifs_listxattr,
1163 .fiemap = cifs_fiemap,
1164 .get_acl = cifs_get_acl,
1165 .set_acl = cifs_set_acl,
1168 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1169 struct delayed_call *done)
1173 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1175 return ERR_PTR(-ENOMEM);
1177 spin_lock(&inode->i_lock);
1178 if (likely(CIFS_I(inode)->symlink_target)) {
1179 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1182 target_path = ERR_PTR(-EOPNOTSUPP);
1184 spin_unlock(&inode->i_lock);
1186 if (!IS_ERR(target_path))
1187 set_delayed_call(done, kfree_link, target_path);
1192 const struct inode_operations cifs_symlink_inode_ops = {
1193 .get_link = cifs_get_link,
1194 .permission = cifs_permission,
1195 .listxattr = cifs_listxattr,
1198 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1199 struct file *dst_file, loff_t destoff, loff_t len,
1200 unsigned int remap_flags)
1202 struct inode *src_inode = file_inode(src_file);
1203 struct inode *target_inode = file_inode(dst_file);
1204 struct cifsFileInfo *smb_file_src = src_file->private_data;
1205 struct cifsFileInfo *smb_file_target;
1206 struct cifs_tcon *target_tcon;
1210 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1213 cifs_dbg(FYI, "clone range\n");
1217 if (!src_file->private_data || !dst_file->private_data) {
1219 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1223 smb_file_target = dst_file->private_data;
1224 target_tcon = tlink_tcon(smb_file_target->tlink);
1227 * Note: cifs case is easier than btrfs since server responsible for
1228 * checks for proper open modes and file type and if it wants
1229 * server could even support copy of range where source = target
1231 lock_two_nondirectories(target_inode, src_inode);
1234 len = src_inode->i_size - off;
1236 cifs_dbg(FYI, "about to flush pages\n");
1237 /* should we flush first and last page first */
1238 truncate_inode_pages_range(&target_inode->i_data, destoff,
1239 PAGE_ALIGN(destoff + len)-1);
1241 if (target_tcon->ses->server->ops->duplicate_extents)
1242 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1243 smb_file_src, smb_file_target, off, len, destoff);
1247 /* force revalidate of size and timestamps of target file now
1248 that target is updated on the server */
1249 CIFS_I(target_inode)->time = 0;
1250 /* although unlocking in the reverse order from locking is not
1251 strictly necessary here it is a little cleaner to be consistent */
1252 unlock_two_nondirectories(src_inode, target_inode);
1255 return rc < 0 ? rc : len;
1258 ssize_t cifs_file_copychunk_range(unsigned int xid,
1259 struct file *src_file, loff_t off,
1260 struct file *dst_file, loff_t destoff,
1261 size_t len, unsigned int flags)
1263 struct inode *src_inode = file_inode(src_file);
1264 struct inode *target_inode = file_inode(dst_file);
1265 struct cifsFileInfo *smb_file_src;
1266 struct cifsFileInfo *smb_file_target;
1267 struct cifs_tcon *src_tcon;
1268 struct cifs_tcon *target_tcon;
1271 cifs_dbg(FYI, "copychunk range\n");
1273 if (!src_file->private_data || !dst_file->private_data) {
1275 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1280 smb_file_target = dst_file->private_data;
1281 smb_file_src = src_file->private_data;
1282 src_tcon = tlink_tcon(smb_file_src->tlink);
1283 target_tcon = tlink_tcon(smb_file_target->tlink);
1285 if (src_tcon->ses != target_tcon->ses) {
1286 cifs_dbg(VFS, "source and target of copy not on same server\n");
1291 if (!target_tcon->ses->server->ops->copychunk_range)
1295 * Note: cifs case is easier than btrfs since server responsible for
1296 * checks for proper open modes and file type and if it wants
1297 * server could even support copy of range where source = target
1299 lock_two_nondirectories(target_inode, src_inode);
1301 cifs_dbg(FYI, "about to flush pages\n");
1303 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1308 /* should we flush first and last page first */
1309 truncate_inode_pages(&target_inode->i_data, 0);
1311 rc = file_modified(dst_file);
1313 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1314 smb_file_src, smb_file_target, off, len, destoff);
1316 file_accessed(src_file);
1318 /* force revalidate of size and timestamps of target file now
1319 * that target is updated on the server
1321 CIFS_I(target_inode)->time = 0;
1324 /* although unlocking in the reverse order from locking is not
1325 * strictly necessary here it is a little cleaner to be consistent
1327 unlock_two_nondirectories(src_inode, target_inode);
1334 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1335 * is a dummy operation.
1337 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1339 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1345 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1346 struct file *dst_file, loff_t destoff,
1347 size_t len, unsigned int flags)
1349 unsigned int xid = get_xid();
1351 struct cifsFileInfo *cfile = dst_file->private_data;
1353 if (cfile->swapfile) {
1359 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1363 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1364 rc = generic_copy_file_range(src_file, off, dst_file,
1365 destoff, len, flags);
1369 const struct file_operations cifs_file_ops = {
1370 .read_iter = cifs_loose_read_iter,
1371 .write_iter = cifs_file_write_iter,
1373 .release = cifs_close,
1375 .flock = cifs_flock,
1376 .fsync = cifs_fsync,
1377 .flush = cifs_flush,
1378 .mmap = cifs_file_mmap,
1379 .splice_read = filemap_splice_read,
1380 .splice_write = iter_file_splice_write,
1381 .llseek = cifs_llseek,
1382 .unlocked_ioctl = cifs_ioctl,
1383 .copy_file_range = cifs_copy_file_range,
1384 .remap_file_range = cifs_remap_file_range,
1385 .setlease = cifs_setlease,
1386 .fallocate = cifs_fallocate,
1389 const struct file_operations cifs_file_strict_ops = {
1390 .read_iter = cifs_strict_readv,
1391 .write_iter = cifs_strict_writev,
1393 .release = cifs_close,
1395 .flock = cifs_flock,
1396 .fsync = cifs_strict_fsync,
1397 .flush = cifs_flush,
1398 .mmap = cifs_file_strict_mmap,
1399 .splice_read = filemap_splice_read,
1400 .splice_write = iter_file_splice_write,
1401 .llseek = cifs_llseek,
1402 .unlocked_ioctl = cifs_ioctl,
1403 .copy_file_range = cifs_copy_file_range,
1404 .remap_file_range = cifs_remap_file_range,
1405 .setlease = cifs_setlease,
1406 .fallocate = cifs_fallocate,
1409 const struct file_operations cifs_file_direct_ops = {
1410 .read_iter = cifs_direct_readv,
1411 .write_iter = cifs_direct_writev,
1413 .release = cifs_close,
1415 .flock = cifs_flock,
1416 .fsync = cifs_fsync,
1417 .flush = cifs_flush,
1418 .mmap = cifs_file_mmap,
1419 .splice_read = copy_splice_read,
1420 .splice_write = iter_file_splice_write,
1421 .unlocked_ioctl = cifs_ioctl,
1422 .copy_file_range = cifs_copy_file_range,
1423 .remap_file_range = cifs_remap_file_range,
1424 .llseek = cifs_llseek,
1425 .setlease = cifs_setlease,
1426 .fallocate = cifs_fallocate,
1429 const struct file_operations cifs_file_nobrl_ops = {
1430 .read_iter = cifs_loose_read_iter,
1431 .write_iter = cifs_file_write_iter,
1433 .release = cifs_close,
1434 .fsync = cifs_fsync,
1435 .flush = cifs_flush,
1436 .mmap = cifs_file_mmap,
1437 .splice_read = filemap_splice_read,
1438 .splice_write = iter_file_splice_write,
1439 .llseek = cifs_llseek,
1440 .unlocked_ioctl = cifs_ioctl,
1441 .copy_file_range = cifs_copy_file_range,
1442 .remap_file_range = cifs_remap_file_range,
1443 .setlease = cifs_setlease,
1444 .fallocate = cifs_fallocate,
1447 const struct file_operations cifs_file_strict_nobrl_ops = {
1448 .read_iter = cifs_strict_readv,
1449 .write_iter = cifs_strict_writev,
1451 .release = cifs_close,
1452 .fsync = cifs_strict_fsync,
1453 .flush = cifs_flush,
1454 .mmap = cifs_file_strict_mmap,
1455 .splice_read = filemap_splice_read,
1456 .splice_write = iter_file_splice_write,
1457 .llseek = cifs_llseek,
1458 .unlocked_ioctl = cifs_ioctl,
1459 .copy_file_range = cifs_copy_file_range,
1460 .remap_file_range = cifs_remap_file_range,
1461 .setlease = cifs_setlease,
1462 .fallocate = cifs_fallocate,
1465 const struct file_operations cifs_file_direct_nobrl_ops = {
1466 .read_iter = cifs_direct_readv,
1467 .write_iter = cifs_direct_writev,
1469 .release = cifs_close,
1470 .fsync = cifs_fsync,
1471 .flush = cifs_flush,
1472 .mmap = cifs_file_mmap,
1473 .splice_read = copy_splice_read,
1474 .splice_write = iter_file_splice_write,
1475 .unlocked_ioctl = cifs_ioctl,
1476 .copy_file_range = cifs_copy_file_range,
1477 .remap_file_range = cifs_remap_file_range,
1478 .llseek = cifs_llseek,
1479 .setlease = cifs_setlease,
1480 .fallocate = cifs_fallocate,
1483 const struct file_operations cifs_dir_ops = {
1484 .iterate_shared = cifs_readdir,
1485 .release = cifs_closedir,
1486 .read = generic_read_dir,
1487 .unlocked_ioctl = cifs_ioctl,
1488 .copy_file_range = cifs_copy_file_range,
1489 .remap_file_range = cifs_remap_file_range,
1490 .llseek = generic_file_llseek,
1491 .fsync = cifs_dir_fsync,
1495 cifs_init_once(void *inode)
1497 struct cifsInodeInfo *cifsi = inode;
1499 inode_init_once(&cifsi->netfs.inode);
1500 init_rwsem(&cifsi->lock_sem);
1504 cifs_init_inodecache(void)
1506 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1507 sizeof(struct cifsInodeInfo),
1508 0, (SLAB_RECLAIM_ACCOUNT|
1509 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1511 if (cifs_inode_cachep == NULL)
1518 cifs_destroy_inodecache(void)
1521 * Make sure all delayed rcu free inodes are flushed before we
1525 kmem_cache_destroy(cifs_inode_cachep);
1529 cifs_init_request_bufs(void)
1532 * SMB2 maximum header size is bigger than CIFS one - no problems to
1533 * allocate some more bytes for CIFS.
1535 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1537 if (CIFSMaxBufSize < 8192) {
1538 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1539 Unicode path name has to fit in any SMB/CIFS path based frames */
1540 CIFSMaxBufSize = 8192;
1541 } else if (CIFSMaxBufSize > 1024*127) {
1542 CIFSMaxBufSize = 1024 * 127;
1544 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1547 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1548 CIFSMaxBufSize, CIFSMaxBufSize);
1550 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1551 CIFSMaxBufSize + max_hdr_size, 0,
1552 SLAB_HWCACHE_ALIGN, 0,
1553 CIFSMaxBufSize + max_hdr_size,
1555 if (cifs_req_cachep == NULL)
1558 if (cifs_min_rcv < 1)
1560 else if (cifs_min_rcv > 64) {
1562 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1565 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1568 if (cifs_req_poolp == NULL) {
1569 kmem_cache_destroy(cifs_req_cachep);
1572 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1573 almost all handle based requests (but not write response, nor is it
1574 sufficient for path based requests). A smaller size would have
1575 been more efficient (compacting multiple slab items on one 4k page)
1576 for the case in which debug was on, but this larger size allows
1577 more SMBs to use small buffer alloc and is still much more
1578 efficient to alloc 1 per page off the slab compared to 17K (5page)
1579 alloc of large cifs buffers even when page debugging is on */
1580 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1581 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1582 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1583 if (cifs_sm_req_cachep == NULL) {
1584 mempool_destroy(cifs_req_poolp);
1585 kmem_cache_destroy(cifs_req_cachep);
1589 if (cifs_min_small < 2)
1591 else if (cifs_min_small > 256) {
1592 cifs_min_small = 256;
1593 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1596 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1597 cifs_sm_req_cachep);
1599 if (cifs_sm_req_poolp == NULL) {
1600 mempool_destroy(cifs_req_poolp);
1601 kmem_cache_destroy(cifs_req_cachep);
1602 kmem_cache_destroy(cifs_sm_req_cachep);
1610 cifs_destroy_request_bufs(void)
1612 mempool_destroy(cifs_req_poolp);
1613 kmem_cache_destroy(cifs_req_cachep);
1614 mempool_destroy(cifs_sm_req_poolp);
1615 kmem_cache_destroy(cifs_sm_req_cachep);
1618 static int init_mids(void)
1620 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1621 sizeof(struct mid_q_entry), 0,
1622 SLAB_HWCACHE_ALIGN, NULL);
1623 if (cifs_mid_cachep == NULL)
1626 /* 3 is a reasonable minimum number of simultaneous operations */
1627 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1628 if (cifs_mid_poolp == NULL) {
1629 kmem_cache_destroy(cifs_mid_cachep);
1636 static void destroy_mids(void)
1638 mempool_destroy(cifs_mid_poolp);
1639 kmem_cache_destroy(cifs_mid_cachep);
1647 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1649 * Initialize Global counters
1651 atomic_set(&sesInfoAllocCount, 0);
1652 atomic_set(&tconInfoAllocCount, 0);
1653 atomic_set(&tcpSesNextId, 0);
1654 atomic_set(&tcpSesAllocCount, 0);
1655 atomic_set(&tcpSesReconnectCount, 0);
1656 atomic_set(&tconInfoReconnectCount, 0);
1658 atomic_set(&buf_alloc_count, 0);
1659 atomic_set(&small_buf_alloc_count, 0);
1660 #ifdef CONFIG_CIFS_STATS2
1661 atomic_set(&total_buf_alloc_count, 0);
1662 atomic_set(&total_small_buf_alloc_count, 0);
1663 if (slow_rsp_threshold < 1)
1664 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1665 else if (slow_rsp_threshold > 32767)
1667 "slow response threshold set higher than recommended (0 to 32767)\n");
1668 #endif /* CONFIG_CIFS_STATS2 */
1670 atomic_set(&mid_count, 0);
1671 GlobalCurrentXid = 0;
1672 GlobalTotalActiveXid = 0;
1673 GlobalMaxActiveXid = 0;
1674 spin_lock_init(&cifs_tcp_ses_lock);
1675 spin_lock_init(&GlobalMid_Lock);
1677 cifs_lock_secret = get_random_u32();
1679 if (cifs_max_pending < 2) {
1680 cifs_max_pending = 2;
1681 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1682 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1683 cifs_max_pending = CIFS_MAX_REQ;
1684 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1688 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1689 if (dir_cache_timeout > 65000) {
1690 dir_cache_timeout = 65000;
1691 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1694 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1697 goto out_clean_proc;
1701 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1702 * so that we don't launch too many worker threads but
1703 * Documentation/core-api/workqueue.rst recommends setting it to 0
1706 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1707 decrypt_wq = alloc_workqueue("smb3decryptd",
1708 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1711 goto out_destroy_cifsiod_wq;
1714 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1715 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1716 if (!fileinfo_put_wq) {
1718 goto out_destroy_decrypt_wq;
1721 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1722 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1723 if (!cifsoplockd_wq) {
1725 goto out_destroy_fileinfo_put_wq;
1728 deferredclose_wq = alloc_workqueue("deferredclose",
1729 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1730 if (!deferredclose_wq) {
1732 goto out_destroy_cifsoplockd_wq;
1735 rc = cifs_init_inodecache();
1737 goto out_destroy_deferredclose_wq;
1741 goto out_destroy_inodecache;
1743 rc = cifs_init_request_bufs();
1745 goto out_destroy_mids;
1747 #ifdef CONFIG_CIFS_DFS_UPCALL
1748 rc = dfs_cache_init();
1750 goto out_destroy_request_bufs;
1751 #endif /* CONFIG_CIFS_DFS_UPCALL */
1752 #ifdef CONFIG_CIFS_UPCALL
1753 rc = init_cifs_spnego();
1755 goto out_destroy_dfs_cache;
1756 #endif /* CONFIG_CIFS_UPCALL */
1757 #ifdef CONFIG_CIFS_SWN_UPCALL
1758 rc = cifs_genl_init();
1760 goto out_register_key_type;
1761 #endif /* CONFIG_CIFS_SWN_UPCALL */
1763 rc = init_cifs_idmap();
1765 goto out_cifs_swn_init;
1767 rc = register_filesystem(&cifs_fs_type);
1769 goto out_init_cifs_idmap;
1771 rc = register_filesystem(&smb3_fs_type);
1773 unregister_filesystem(&cifs_fs_type);
1774 goto out_init_cifs_idmap;
1779 out_init_cifs_idmap:
1782 #ifdef CONFIG_CIFS_SWN_UPCALL
1784 out_register_key_type:
1786 #ifdef CONFIG_CIFS_UPCALL
1788 out_destroy_dfs_cache:
1790 #ifdef CONFIG_CIFS_DFS_UPCALL
1791 dfs_cache_destroy();
1792 out_destroy_request_bufs:
1794 cifs_destroy_request_bufs();
1797 out_destroy_inodecache:
1798 cifs_destroy_inodecache();
1799 out_destroy_deferredclose_wq:
1800 destroy_workqueue(deferredclose_wq);
1801 out_destroy_cifsoplockd_wq:
1802 destroy_workqueue(cifsoplockd_wq);
1803 out_destroy_fileinfo_put_wq:
1804 destroy_workqueue(fileinfo_put_wq);
1805 out_destroy_decrypt_wq:
1806 destroy_workqueue(decrypt_wq);
1807 out_destroy_cifsiod_wq:
1808 destroy_workqueue(cifsiod_wq);
1817 cifs_dbg(NOISY, "exit_smb3\n");
1818 unregister_filesystem(&cifs_fs_type);
1819 unregister_filesystem(&smb3_fs_type);
1820 cifs_release_automount_timer();
1822 #ifdef CONFIG_CIFS_SWN_UPCALL
1825 #ifdef CONFIG_CIFS_UPCALL
1828 #ifdef CONFIG_CIFS_DFS_UPCALL
1829 dfs_cache_destroy();
1831 cifs_destroy_request_bufs();
1833 cifs_destroy_inodecache();
1834 destroy_workqueue(deferredclose_wq);
1835 destroy_workqueue(cifsoplockd_wq);
1836 destroy_workqueue(decrypt_wq);
1837 destroy_workqueue(fileinfo_put_wq);
1838 destroy_workqueue(cifsiod_wq);
1842 MODULE_AUTHOR("Steve French");
1843 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1845 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1846 "also older servers complying with the SNIA CIFS Specification)");
1847 MODULE_VERSION(CIFS_VERSION);
1848 MODULE_SOFTDEP("ecb");
1849 MODULE_SOFTDEP("hmac");
1850 MODULE_SOFTDEP("md5");
1851 MODULE_SOFTDEP("nls");
1852 MODULE_SOFTDEP("aes");
1853 MODULE_SOFTDEP("cmac");
1854 MODULE_SOFTDEP("sha256");
1855 MODULE_SOFTDEP("sha512");
1856 MODULE_SOFTDEP("aead2");
1857 MODULE_SOFTDEP("ccm");
1858 MODULE_SOFTDEP("gcm");
1859 module_init(init_cifs)
1860 module_exit(exit_cifs)