4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/uuid.h>
41 #include <linux/xattr.h>
45 #define DECLARE_GLOBALS_HERE
47 #include "cifsproto.h"
48 #include "cifs_debug.h"
49 #include "cifs_fs_sb.h"
51 #include <linux/key-type.h>
52 #include "cifs_spnego.h"
55 #ifdef CONFIG_CIFS_DFS_UPCALL
56 #include "dfs_cache.h"
61 bool enable_oplocks = true;
62 bool linuxExtEnabled = true;
63 bool lookupCacheEnabled = true;
64 bool disable_legacy_dialects; /* false by default */
65 unsigned int global_secflags = CIFSSEC_DEF;
66 /* unsigned int ntlmv2_support = 0; */
67 unsigned int sign_CIFS_PDUs = 1;
68 static const struct super_operations cifs_super_ops;
69 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
70 module_param(CIFSMaxBufSize, uint, 0444);
71 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
73 "Default: 16384 Range: 8192 to 130048");
74 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
75 module_param(cifs_min_rcv, uint, 0444);
76 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
78 unsigned int cifs_min_small = 30;
79 module_param(cifs_min_small, uint, 0444);
80 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
82 unsigned int cifs_max_pending = CIFS_MAX_REQ;
83 module_param(cifs_max_pending, uint, 0444);
84 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
85 "CIFS/SMB1 dialect (N/A for SMB3) "
86 "Default: 32767 Range: 2 to 32767.");
87 #ifdef CONFIG_CIFS_STATS2
88 unsigned int slow_rsp_threshold = 1;
89 module_param(slow_rsp_threshold, uint, 0644);
90 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
91 "before logging that a response is delayed. "
92 "Default: 1 (if set to 0 disables msg).");
95 module_param(enable_oplocks, bool, 0644);
96 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
98 module_param(disable_legacy_dialects, bool, 0644);
99 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
100 "helpful to restrict the ability to "
101 "override the default dialects (SMB2.1, "
102 "SMB3 and SMB3.02) on mount with old "
103 "dialects (CIFS/SMB1 and SMB2) since "
104 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
105 " and less secure. Default: n/N/0");
107 extern mempool_t *cifs_sm_req_poolp;
108 extern mempool_t *cifs_req_poolp;
109 extern mempool_t *cifs_mid_poolp;
111 struct workqueue_struct *cifsiod_wq;
112 struct workqueue_struct *cifsoplockd_wq;
113 __u32 cifs_lock_secret;
116 * Bumps refcount for cifs super block.
117 * Note that it should be only called if a referece to VFS super block is
118 * already held, e.g. in open-type syscalls context. Otherwise it can race with
119 * atomic_dec_and_test in deactivate_locked_super.
122 cifs_sb_active(struct super_block *sb)
124 struct cifs_sb_info *server = CIFS_SB(sb);
126 if (atomic_inc_return(&server->active) == 1)
127 atomic_inc(&sb->s_active);
131 cifs_sb_deactive(struct super_block *sb)
133 struct cifs_sb_info *server = CIFS_SB(sb);
135 if (atomic_dec_and_test(&server->active))
136 deactivate_super(sb);
140 cifs_read_super(struct super_block *sb)
143 struct cifs_sb_info *cifs_sb;
144 struct cifs_tcon *tcon;
147 cifs_sb = CIFS_SB(sb);
148 tcon = cifs_sb_master_tcon(cifs_sb);
150 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
151 sb->s_flags |= SB_POSIXACL;
153 if (tcon->snapshot_time)
154 sb->s_flags |= SB_RDONLY;
156 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
157 sb->s_maxbytes = MAX_LFS_FILESIZE;
159 sb->s_maxbytes = MAX_NON_LFS;
161 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
162 sb->s_time_gran = 100;
164 sb->s_magic = CIFS_MAGIC_NUMBER;
165 sb->s_op = &cifs_super_ops;
166 sb->s_xattr = cifs_xattr_handlers;
167 rc = super_setup_bdi(sb);
170 /* tune readahead according to rsize */
171 sb->s_bdi->ra_pages = cifs_sb->rsize / PAGE_SIZE;
173 sb->s_blocksize = CIFS_MAX_MSGSIZE;
174 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
175 inode = cifs_root_iget(sb);
183 sb->s_d_op = &cifs_ci_dentry_ops;
185 sb->s_d_op = &cifs_dentry_ops;
187 sb->s_root = d_make_root(inode);
193 #ifdef CONFIG_CIFS_NFSD_EXPORT
194 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
195 cifs_dbg(FYI, "export ops supported\n");
196 sb->s_export_op = &cifs_export_ops;
198 #endif /* CONFIG_CIFS_NFSD_EXPORT */
203 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
207 static void cifs_kill_sb(struct super_block *sb)
209 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
211 cifs_umount(cifs_sb);
215 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
217 struct super_block *sb = dentry->d_sb;
218 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
219 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
220 struct TCP_Server_Info *server = tcon->ses->server;
226 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
228 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
230 buf->f_namelen = PATH_MAX;
232 buf->f_fsid.val[0] = tcon->vol_serial_number;
233 /* are using part of create time for more randomness, see man statfs */
234 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
236 buf->f_files = 0; /* undefined */
237 buf->f_ffree = 0; /* unlimited */
239 if (server->ops->queryfs)
240 rc = server->ops->queryfs(xid, tcon, buf);
246 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
248 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
249 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
250 struct TCP_Server_Info *server = tcon->ses->server;
252 if (server->ops->fallocate)
253 return server->ops->fallocate(file, tcon, mode, off, len);
258 static int cifs_permission(struct inode *inode, int mask)
260 struct cifs_sb_info *cifs_sb;
262 cifs_sb = CIFS_SB(inode->i_sb);
264 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
265 if ((mask & MAY_EXEC) && !execute_ok(inode))
269 } else /* file mode might have been restricted at mount time
270 on the client (above and beyond ACL on servers) for
271 servers which do not support setting and viewing mode bits,
272 so allowing client to check permissions is useful */
273 return generic_permission(inode, mask);
276 static struct kmem_cache *cifs_inode_cachep;
277 static struct kmem_cache *cifs_req_cachep;
278 static struct kmem_cache *cifs_mid_cachep;
279 static struct kmem_cache *cifs_sm_req_cachep;
280 mempool_t *cifs_sm_req_poolp;
281 mempool_t *cifs_req_poolp;
282 mempool_t *cifs_mid_poolp;
284 static struct inode *
285 cifs_alloc_inode(struct super_block *sb)
287 struct cifsInodeInfo *cifs_inode;
288 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
291 cifs_inode->cifsAttrs = 0x20; /* default */
292 cifs_inode->time = 0;
294 * Until the file is open and we have gotten oplock info back from the
295 * server, can not assume caching of file data or metadata.
297 cifs_set_oplock_level(cifs_inode, 0);
298 cifs_inode->flags = 0;
299 spin_lock_init(&cifs_inode->writers_lock);
300 cifs_inode->writers = 0;
301 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
302 cifs_inode->server_eof = 0;
303 cifs_inode->uniqueid = 0;
304 cifs_inode->createtime = 0;
305 cifs_inode->epoch = 0;
306 generate_random_uuid(cifs_inode->lease_key);
309 * Can not set i_flags here - they get immediately overwritten to zero
312 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
313 INIT_LIST_HEAD(&cifs_inode->openFileList);
314 INIT_LIST_HEAD(&cifs_inode->llist);
315 return &cifs_inode->vfs_inode;
318 static void cifs_i_callback(struct rcu_head *head)
320 struct inode *inode = container_of(head, struct inode, i_rcu);
321 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
325 cifs_destroy_inode(struct inode *inode)
327 call_rcu(&inode->i_rcu, cifs_i_callback);
331 cifs_evict_inode(struct inode *inode)
333 truncate_inode_pages_final(&inode->i_data);
335 cifs_fscache_release_inode_cookie(inode);
339 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
341 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
342 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
344 seq_puts(s, ",addr=");
346 switch (server->dstaddr.ss_family) {
348 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
351 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
352 if (sa6->sin6_scope_id)
353 seq_printf(s, "%%%u", sa6->sin6_scope_id);
356 seq_puts(s, "(unknown)");
359 seq_puts(s, ",rdma");
363 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
365 if (ses->sectype == Unspecified) {
366 if (ses->user_name == NULL)
367 seq_puts(s, ",sec=none");
371 seq_puts(s, ",sec=");
373 switch (ses->sectype) {
375 seq_puts(s, "lanman");
378 seq_puts(s, "ntlmv2");
384 seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
387 seq_puts(s, "ntlmssp");
390 /* shouldn't ever happen */
391 seq_puts(s, "unknown");
400 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
402 seq_puts(s, ",cache=");
404 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
405 seq_puts(s, "strict");
406 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
409 seq_puts(s, "loose");
413 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
415 struct nls_table *def;
417 /* Display iocharset= option if it's not default charset */
418 def = load_nls_default();
420 seq_printf(s, ",iocharset=%s", cur->charset);
425 * cifs_show_options() is for displaying mount options in /proc/mounts.
426 * Not all settable options are displayed but most of the important
430 cifs_show_options(struct seq_file *s, struct dentry *root)
432 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
433 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
434 struct sockaddr *srcaddr;
435 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
437 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
438 cifs_show_security(s, tcon->ses);
439 cifs_show_cache_flavor(s, cifs_sb);
441 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
442 seq_puts(s, ",multiuser");
443 else if (tcon->ses->user_name)
444 seq_show_option(s, "username", tcon->ses->user_name);
446 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
447 seq_show_option(s, "domain", tcon->ses->domainName);
449 if (srcaddr->sa_family != AF_UNSPEC) {
450 struct sockaddr_in *saddr4;
451 struct sockaddr_in6 *saddr6;
452 saddr4 = (struct sockaddr_in *)srcaddr;
453 saddr6 = (struct sockaddr_in6 *)srcaddr;
454 if (srcaddr->sa_family == AF_INET6)
455 seq_printf(s, ",srcaddr=%pI6c",
457 else if (srcaddr->sa_family == AF_INET)
458 seq_printf(s, ",srcaddr=%pI4",
459 &saddr4->sin_addr.s_addr);
461 seq_printf(s, ",srcaddr=BAD-AF:%i",
462 (int)(srcaddr->sa_family));
465 seq_printf(s, ",uid=%u",
466 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
467 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
468 seq_puts(s, ",forceuid");
470 seq_puts(s, ",noforceuid");
472 seq_printf(s, ",gid=%u",
473 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
475 seq_puts(s, ",forcegid");
477 seq_puts(s, ",noforcegid");
479 cifs_show_address(s, tcon->ses->server);
482 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
483 cifs_sb->mnt_file_mode,
484 cifs_sb->mnt_dir_mode);
486 cifs_show_nls(s, cifs_sb->local_nls);
489 seq_puts(s, ",seal");
491 seq_puts(s, ",nocase");
493 seq_puts(s, ",hard");
495 seq_puts(s, ",soft");
496 if (tcon->use_persistent)
497 seq_puts(s, ",persistenthandles");
498 else if (tcon->use_resilient)
499 seq_puts(s, ",resilienthandles");
500 if (tcon->posix_extensions)
501 seq_puts(s, ",posix");
502 else if (tcon->unix_ext)
503 seq_puts(s, ",unix");
505 seq_puts(s, ",nounix");
506 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
507 seq_puts(s, ",nodfs");
508 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
509 seq_puts(s, ",posixpaths");
510 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
511 seq_puts(s, ",setuids");
512 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
513 seq_puts(s, ",idsfromsid");
514 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
515 seq_puts(s, ",serverino");
516 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
517 seq_puts(s, ",rwpidforward");
518 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
519 seq_puts(s, ",forcemand");
520 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
521 seq_puts(s, ",nouser_xattr");
522 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
523 seq_puts(s, ",mapchars");
524 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
525 seq_puts(s, ",mapposix");
526 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
528 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
529 seq_puts(s, ",nobrl");
530 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
531 seq_puts(s, ",nohandlecache");
532 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
533 seq_puts(s, ",cifsacl");
534 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
535 seq_puts(s, ",dynperm");
536 if (root->d_sb->s_flags & SB_POSIXACL)
538 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
539 seq_puts(s, ",mfsymlinks");
540 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
542 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
543 seq_puts(s, ",nostrictsync");
544 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
545 seq_puts(s, ",noperm");
546 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
547 seq_printf(s, ",backupuid=%u",
548 from_kuid_munged(&init_user_ns,
549 cifs_sb->mnt_backupuid));
550 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
551 seq_printf(s, ",backupgid=%u",
552 from_kgid_munged(&init_user_ns,
553 cifs_sb->mnt_backupgid));
555 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
556 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
557 seq_printf(s, ",bsize=%u", cifs_sb->bsize);
558 seq_printf(s, ",echo_interval=%lu",
559 tcon->ses->server->echo_interval / HZ);
560 if (tcon->snapshot_time)
561 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
562 /* convert actimeo and display it in seconds */
563 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
568 static void cifs_umount_begin(struct super_block *sb)
570 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
571 struct cifs_tcon *tcon;
576 tcon = cifs_sb_master_tcon(cifs_sb);
578 spin_lock(&cifs_tcp_ses_lock);
579 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
580 /* we have other mounts to same share or we have
581 already tried to force umount this and woken up
582 all waiting network requests, nothing to do */
583 spin_unlock(&cifs_tcp_ses_lock);
585 } else if (tcon->tc_count == 1)
586 tcon->tidStatus = CifsExiting;
587 spin_unlock(&cifs_tcp_ses_lock);
589 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
590 /* cancel_notify_requests(tcon); */
591 if (tcon->ses && tcon->ses->server) {
592 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
593 wake_up_all(&tcon->ses->server->request_q);
594 wake_up_all(&tcon->ses->server->response_q);
595 msleep(1); /* yield */
596 /* we have to kick the requests once more */
597 wake_up_all(&tcon->ses->server->response_q);
604 #ifdef CONFIG_CIFS_STATS2
605 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
612 static int cifs_remount(struct super_block *sb, int *flags, char *data)
615 *flags |= SB_NODIRATIME;
619 static int cifs_drop_inode(struct inode *inode)
621 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
623 /* no serverino => unconditional eviction */
624 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
625 generic_drop_inode(inode);
628 static const struct super_operations cifs_super_ops = {
629 .statfs = cifs_statfs,
630 .alloc_inode = cifs_alloc_inode,
631 .destroy_inode = cifs_destroy_inode,
632 .drop_inode = cifs_drop_inode,
633 .evict_inode = cifs_evict_inode,
634 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
635 function unless later we add lazy close of inodes or unless the
636 kernel forgets to call us with the same number of releases (closes)
638 .show_options = cifs_show_options,
639 .umount_begin = cifs_umount_begin,
640 .remount_fs = cifs_remount,
641 #ifdef CONFIG_CIFS_STATS2
642 .show_stats = cifs_show_stats,
647 * Get root dentry from superblock according to prefix path mount option.
648 * Return dentry with refcount + 1 on success and NULL otherwise.
650 static struct dentry *
651 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
653 struct dentry *dentry;
654 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
655 char *full_path = NULL;
659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
660 return dget(sb->s_root);
662 full_path = cifs_build_path_to_root(vol, cifs_sb,
663 cifs_sb_master_tcon(cifs_sb), 0);
664 if (full_path == NULL)
665 return ERR_PTR(-ENOMEM);
667 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
669 sep = CIFS_DIR_SEP(cifs_sb);
670 dentry = dget(sb->s_root);
674 struct inode *dir = d_inode(dentry);
675 struct dentry *child;
679 dentry = ERR_PTR(-ENOENT);
682 if (!S_ISDIR(dir->i_mode)) {
684 dentry = ERR_PTR(-ENOTDIR);
688 /* skip separators */
695 while (*s && *s != sep)
698 child = lookup_one_len_unlocked(p, dentry, s - p);
701 } while (!IS_ERR(dentry));
706 static int cifs_set_super(struct super_block *sb, void *data)
708 struct cifs_mnt_data *mnt_data = data;
709 sb->s_fs_info = mnt_data->cifs_sb;
710 return set_anon_super(sb, NULL);
713 static struct dentry *
714 cifs_smb3_do_mount(struct file_system_type *fs_type,
715 int flags, const char *dev_name, void *data, bool is_smb3)
718 struct super_block *sb;
719 struct cifs_sb_info *cifs_sb;
720 struct smb_vol *volume_info;
721 struct cifs_mnt_data mnt_data;
725 * Prints in Kernel / CIFS log the attempted mount operation
726 * If CIFS_DEBUG && cifs_FYI
729 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
731 cifs_info("Attempting to mount %s\n", dev_name);
733 volume_info = cifs_get_volume_info((char *)data, dev_name, is_smb3);
734 if (IS_ERR(volume_info))
735 return ERR_CAST(volume_info);
737 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
738 if (cifs_sb == NULL) {
739 root = ERR_PTR(-ENOMEM);
743 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
744 if (cifs_sb->mountdata == NULL) {
745 root = ERR_PTR(-ENOMEM);
749 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
755 rc = cifs_mount(cifs_sb, volume_info);
757 if (!(flags & SB_SILENT))
758 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
764 mnt_data.vol = volume_info;
765 mnt_data.cifs_sb = cifs_sb;
766 mnt_data.flags = flags;
768 /* BB should we make this contingent on mount parm? */
769 flags |= SB_NODIRATIME | SB_NOATIME;
771 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
774 cifs_umount(cifs_sb);
779 cifs_dbg(FYI, "Use existing superblock\n");
780 cifs_umount(cifs_sb);
782 rc = cifs_read_super(sb);
788 sb->s_flags |= SB_ACTIVE;
791 root = cifs_get_root(volume_info, sb);
795 cifs_dbg(FYI, "dentry root is: %p\n", root);
799 deactivate_locked_super(sb);
801 cifs_cleanup_volume_info(volume_info);
805 kfree(cifs_sb->prepath);
806 kfree(cifs_sb->mountdata);
809 unload_nls(volume_info->local_nls);
813 static struct dentry *
814 smb3_do_mount(struct file_system_type *fs_type,
815 int flags, const char *dev_name, void *data)
817 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, true);
820 static struct dentry *
821 cifs_do_mount(struct file_system_type *fs_type,
822 int flags, const char *dev_name, void *data)
824 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, false);
828 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
831 struct inode *inode = file_inode(iocb->ki_filp);
833 if (iocb->ki_filp->f_flags & O_DIRECT)
834 return cifs_user_readv(iocb, iter);
836 rc = cifs_revalidate_mapping(inode);
840 return generic_file_read_iter(iocb, iter);
843 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
845 struct inode *inode = file_inode(iocb->ki_filp);
846 struct cifsInodeInfo *cinode = CIFS_I(inode);
850 if (iocb->ki_filp->f_flags & O_DIRECT) {
851 written = cifs_user_writev(iocb, from);
852 if (written > 0 && CIFS_CACHE_READ(cinode)) {
853 cifs_zap_mapping(inode);
855 "Set no oplock for inode=%p after a write operation\n",
862 written = cifs_get_writer(cinode);
866 written = generic_file_write_iter(iocb, from);
868 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
871 rc = filemap_fdatawrite(inode->i_mapping);
873 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
877 cifs_put_writer(cinode);
881 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
884 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
885 * the cached file length
887 if (whence != SEEK_SET && whence != SEEK_CUR) {
889 struct inode *inode = file_inode(file);
892 * We need to be sure that all dirty pages are written and the
893 * server has the newest file length.
895 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
896 inode->i_mapping->nrpages != 0) {
897 rc = filemap_fdatawait(inode->i_mapping);
899 mapping_set_error(inode->i_mapping, rc);
904 * Some applications poll for the file length in this strange
905 * way so we must seek to end on non-oplocked files by
906 * setting the revalidate time to zero.
908 CIFS_I(inode)->time = 0;
910 rc = cifs_revalidate_file_attr(file);
914 return generic_file_llseek(file, offset, whence);
918 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
921 * Note that this is called by vfs setlease with i_lock held to
922 * protect *lease from going away.
924 struct inode *inode = file_inode(file);
925 struct cifsFileInfo *cfile = file->private_data;
927 if (!(S_ISREG(inode->i_mode)))
930 /* Check if file is oplocked if this is request for new lease */
931 if (arg == F_UNLCK ||
932 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
933 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
934 return generic_setlease(file, arg, lease, priv);
935 else if (tlink_tcon(cfile->tlink)->local_lease &&
936 !CIFS_CACHE_READ(CIFS_I(inode)))
938 * If the server claims to support oplock on this file, then we
939 * still need to check oplock even if the local_lease mount
940 * option is set, but there are servers which do not support
941 * oplock for which this mount option may be useful if the user
942 * knows that the file won't be changed on the server by anyone
945 return generic_setlease(file, arg, lease, priv);
950 struct file_system_type cifs_fs_type = {
951 .owner = THIS_MODULE,
953 .mount = cifs_do_mount,
954 .kill_sb = cifs_kill_sb,
957 MODULE_ALIAS_FS("cifs");
959 static struct file_system_type smb3_fs_type = {
960 .owner = THIS_MODULE,
962 .mount = smb3_do_mount,
963 .kill_sb = cifs_kill_sb,
966 MODULE_ALIAS_FS("smb3");
967 MODULE_ALIAS("smb3");
969 const struct inode_operations cifs_dir_inode_ops = {
970 .create = cifs_create,
971 .atomic_open = cifs_atomic_open,
972 .lookup = cifs_lookup,
973 .getattr = cifs_getattr,
974 .unlink = cifs_unlink,
975 .link = cifs_hardlink,
978 .rename = cifs_rename2,
979 .permission = cifs_permission,
980 .setattr = cifs_setattr,
981 .symlink = cifs_symlink,
983 .listxattr = cifs_listxattr,
986 const struct inode_operations cifs_file_inode_ops = {
987 .setattr = cifs_setattr,
988 .getattr = cifs_getattr,
989 .permission = cifs_permission,
990 .listxattr = cifs_listxattr,
993 const struct inode_operations cifs_symlink_inode_ops = {
994 .get_link = cifs_get_link,
995 .permission = cifs_permission,
996 .listxattr = cifs_listxattr,
999 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1000 struct file *dst_file, loff_t destoff, loff_t len,
1001 unsigned int remap_flags)
1003 struct inode *src_inode = file_inode(src_file);
1004 struct inode *target_inode = file_inode(dst_file);
1005 struct cifsFileInfo *smb_file_src = src_file->private_data;
1006 struct cifsFileInfo *smb_file_target;
1007 struct cifs_tcon *target_tcon;
1011 if (remap_flags & ~REMAP_FILE_ADVISORY)
1014 cifs_dbg(FYI, "clone range\n");
1018 if (!src_file->private_data || !dst_file->private_data) {
1020 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1024 smb_file_target = dst_file->private_data;
1025 target_tcon = tlink_tcon(smb_file_target->tlink);
1028 * Note: cifs case is easier than btrfs since server responsible for
1029 * checks for proper open modes and file type and if it wants
1030 * server could even support copy of range where source = target
1032 lock_two_nondirectories(target_inode, src_inode);
1035 len = src_inode->i_size - off;
1037 cifs_dbg(FYI, "about to flush pages\n");
1038 /* should we flush first and last page first */
1039 truncate_inode_pages_range(&target_inode->i_data, destoff,
1040 PAGE_ALIGN(destoff + len)-1);
1042 if (target_tcon->ses->server->ops->duplicate_extents)
1043 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1044 smb_file_src, smb_file_target, off, len, destoff);
1048 /* force revalidate of size and timestamps of target file now
1049 that target is updated on the server */
1050 CIFS_I(target_inode)->time = 0;
1051 /* although unlocking in the reverse order from locking is not
1052 strictly necessary here it is a little cleaner to be consistent */
1053 unlock_two_nondirectories(src_inode, target_inode);
1056 return rc < 0 ? rc : len;
1059 ssize_t cifs_file_copychunk_range(unsigned int xid,
1060 struct file *src_file, loff_t off,
1061 struct file *dst_file, loff_t destoff,
1062 size_t len, unsigned int flags)
1064 struct inode *src_inode = file_inode(src_file);
1065 struct inode *target_inode = file_inode(dst_file);
1066 struct cifsFileInfo *smb_file_src;
1067 struct cifsFileInfo *smb_file_target;
1068 struct cifs_tcon *src_tcon;
1069 struct cifs_tcon *target_tcon;
1072 cifs_dbg(FYI, "copychunk range\n");
1074 if (src_inode == target_inode) {
1079 if (!src_file->private_data || !dst_file->private_data) {
1081 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1086 smb_file_target = dst_file->private_data;
1087 smb_file_src = src_file->private_data;
1088 src_tcon = tlink_tcon(smb_file_src->tlink);
1089 target_tcon = tlink_tcon(smb_file_target->tlink);
1091 if (src_tcon->ses != target_tcon->ses) {
1092 cifs_dbg(VFS, "source and target of copy not on same server\n");
1097 * Note: cifs case is easier than btrfs since server responsible for
1098 * checks for proper open modes and file type and if it wants
1099 * server could even support copy of range where source = target
1101 lock_two_nondirectories(target_inode, src_inode);
1103 cifs_dbg(FYI, "about to flush pages\n");
1104 /* should we flush first and last page first */
1105 truncate_inode_pages(&target_inode->i_data, 0);
1107 if (target_tcon->ses->server->ops->copychunk_range)
1108 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1109 smb_file_src, smb_file_target, off, len, destoff);
1113 /* force revalidate of size and timestamps of target file now
1114 * that target is updated on the server
1116 CIFS_I(target_inode)->time = 0;
1117 /* although unlocking in the reverse order from locking is not
1118 * strictly necessary here it is a little cleaner to be consistent
1120 unlock_two_nondirectories(src_inode, target_inode);
1127 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1128 * is a dummy operation.
1130 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1132 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1138 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1139 struct file *dst_file, loff_t destoff,
1140 size_t len, unsigned int flags)
1142 unsigned int xid = get_xid();
1145 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1151 const struct file_operations cifs_file_ops = {
1152 .read_iter = cifs_loose_read_iter,
1153 .write_iter = cifs_file_write_iter,
1155 .release = cifs_close,
1157 .fsync = cifs_fsync,
1158 .flush = cifs_flush,
1159 .mmap = cifs_file_mmap,
1160 .splice_read = generic_file_splice_read,
1161 .splice_write = iter_file_splice_write,
1162 .llseek = cifs_llseek,
1163 .unlocked_ioctl = cifs_ioctl,
1164 .copy_file_range = cifs_copy_file_range,
1165 .remap_file_range = cifs_remap_file_range,
1166 .setlease = cifs_setlease,
1167 .fallocate = cifs_fallocate,
1170 const struct file_operations cifs_file_strict_ops = {
1171 .read_iter = cifs_strict_readv,
1172 .write_iter = cifs_strict_writev,
1174 .release = cifs_close,
1176 .fsync = cifs_strict_fsync,
1177 .flush = cifs_flush,
1178 .mmap = cifs_file_strict_mmap,
1179 .splice_read = generic_file_splice_read,
1180 .splice_write = iter_file_splice_write,
1181 .llseek = cifs_llseek,
1182 .unlocked_ioctl = cifs_ioctl,
1183 .copy_file_range = cifs_copy_file_range,
1184 .remap_file_range = cifs_remap_file_range,
1185 .setlease = cifs_setlease,
1186 .fallocate = cifs_fallocate,
1189 const struct file_operations cifs_file_direct_ops = {
1190 .read_iter = cifs_direct_readv,
1191 .write_iter = cifs_direct_writev,
1193 .release = cifs_close,
1195 .fsync = cifs_fsync,
1196 .flush = cifs_flush,
1197 .mmap = cifs_file_mmap,
1198 .splice_read = generic_file_splice_read,
1199 .splice_write = iter_file_splice_write,
1200 .unlocked_ioctl = cifs_ioctl,
1201 .copy_file_range = cifs_copy_file_range,
1202 .remap_file_range = cifs_remap_file_range,
1203 .llseek = cifs_llseek,
1204 .setlease = cifs_setlease,
1205 .fallocate = cifs_fallocate,
1208 const struct file_operations cifs_file_nobrl_ops = {
1209 .read_iter = cifs_loose_read_iter,
1210 .write_iter = cifs_file_write_iter,
1212 .release = cifs_close,
1213 .fsync = cifs_fsync,
1214 .flush = cifs_flush,
1215 .mmap = cifs_file_mmap,
1216 .splice_read = generic_file_splice_read,
1217 .splice_write = iter_file_splice_write,
1218 .llseek = cifs_llseek,
1219 .unlocked_ioctl = cifs_ioctl,
1220 .copy_file_range = cifs_copy_file_range,
1221 .remap_file_range = cifs_remap_file_range,
1222 .setlease = cifs_setlease,
1223 .fallocate = cifs_fallocate,
1226 const struct file_operations cifs_file_strict_nobrl_ops = {
1227 .read_iter = cifs_strict_readv,
1228 .write_iter = cifs_strict_writev,
1230 .release = cifs_close,
1231 .fsync = cifs_strict_fsync,
1232 .flush = cifs_flush,
1233 .mmap = cifs_file_strict_mmap,
1234 .splice_read = generic_file_splice_read,
1235 .splice_write = iter_file_splice_write,
1236 .llseek = cifs_llseek,
1237 .unlocked_ioctl = cifs_ioctl,
1238 .copy_file_range = cifs_copy_file_range,
1239 .remap_file_range = cifs_remap_file_range,
1240 .setlease = cifs_setlease,
1241 .fallocate = cifs_fallocate,
1244 const struct file_operations cifs_file_direct_nobrl_ops = {
1245 .read_iter = cifs_direct_readv,
1246 .write_iter = cifs_direct_writev,
1248 .release = cifs_close,
1249 .fsync = cifs_fsync,
1250 .flush = cifs_flush,
1251 .mmap = cifs_file_mmap,
1252 .splice_read = generic_file_splice_read,
1253 .splice_write = iter_file_splice_write,
1254 .unlocked_ioctl = cifs_ioctl,
1255 .copy_file_range = cifs_copy_file_range,
1256 .remap_file_range = cifs_remap_file_range,
1257 .llseek = cifs_llseek,
1258 .setlease = cifs_setlease,
1259 .fallocate = cifs_fallocate,
1262 const struct file_operations cifs_dir_ops = {
1263 .iterate_shared = cifs_readdir,
1264 .release = cifs_closedir,
1265 .read = generic_read_dir,
1266 .unlocked_ioctl = cifs_ioctl,
1267 .copy_file_range = cifs_copy_file_range,
1268 .remap_file_range = cifs_remap_file_range,
1269 .llseek = generic_file_llseek,
1270 .fsync = cifs_dir_fsync,
1274 cifs_init_once(void *inode)
1276 struct cifsInodeInfo *cifsi = inode;
1278 inode_init_once(&cifsi->vfs_inode);
1279 init_rwsem(&cifsi->lock_sem);
1283 cifs_init_inodecache(void)
1285 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1286 sizeof(struct cifsInodeInfo),
1287 0, (SLAB_RECLAIM_ACCOUNT|
1288 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1290 if (cifs_inode_cachep == NULL)
1297 cifs_destroy_inodecache(void)
1300 * Make sure all delayed rcu free inodes are flushed before we
1304 kmem_cache_destroy(cifs_inode_cachep);
1308 cifs_init_request_bufs(void)
1311 * SMB2 maximum header size is bigger than CIFS one - no problems to
1312 * allocate some more bytes for CIFS.
1314 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1316 if (CIFSMaxBufSize < 8192) {
1317 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1318 Unicode path name has to fit in any SMB/CIFS path based frames */
1319 CIFSMaxBufSize = 8192;
1320 } else if (CIFSMaxBufSize > 1024*127) {
1321 CIFSMaxBufSize = 1024 * 127;
1323 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1326 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1327 CIFSMaxBufSize, CIFSMaxBufSize);
1329 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1330 CIFSMaxBufSize + max_hdr_size, 0,
1331 SLAB_HWCACHE_ALIGN, 0,
1332 CIFSMaxBufSize + max_hdr_size,
1334 if (cifs_req_cachep == NULL)
1337 if (cifs_min_rcv < 1)
1339 else if (cifs_min_rcv > 64) {
1341 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1344 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1347 if (cifs_req_poolp == NULL) {
1348 kmem_cache_destroy(cifs_req_cachep);
1351 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1352 almost all handle based requests (but not write response, nor is it
1353 sufficient for path based requests). A smaller size would have
1354 been more efficient (compacting multiple slab items on one 4k page)
1355 for the case in which debug was on, but this larger size allows
1356 more SMBs to use small buffer alloc and is still much more
1357 efficient to alloc 1 per page off the slab compared to 17K (5page)
1358 alloc of large cifs buffers even when page debugging is on */
1359 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1360 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1361 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1362 if (cifs_sm_req_cachep == NULL) {
1363 mempool_destroy(cifs_req_poolp);
1364 kmem_cache_destroy(cifs_req_cachep);
1368 if (cifs_min_small < 2)
1370 else if (cifs_min_small > 256) {
1371 cifs_min_small = 256;
1372 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1375 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1376 cifs_sm_req_cachep);
1378 if (cifs_sm_req_poolp == NULL) {
1379 mempool_destroy(cifs_req_poolp);
1380 kmem_cache_destroy(cifs_req_cachep);
1381 kmem_cache_destroy(cifs_sm_req_cachep);
1389 cifs_destroy_request_bufs(void)
1391 mempool_destroy(cifs_req_poolp);
1392 kmem_cache_destroy(cifs_req_cachep);
1393 mempool_destroy(cifs_sm_req_poolp);
1394 kmem_cache_destroy(cifs_sm_req_cachep);
1398 cifs_init_mids(void)
1400 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1401 sizeof(struct mid_q_entry), 0,
1402 SLAB_HWCACHE_ALIGN, NULL);
1403 if (cifs_mid_cachep == NULL)
1406 /* 3 is a reasonable minimum number of simultaneous operations */
1407 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1408 if (cifs_mid_poolp == NULL) {
1409 kmem_cache_destroy(cifs_mid_cachep);
1417 cifs_destroy_mids(void)
1419 mempool_destroy(cifs_mid_poolp);
1420 kmem_cache_destroy(cifs_mid_cachep);
1428 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1429 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1430 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1431 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1432 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1434 * Initialize Global counters
1436 atomic_set(&sesInfoAllocCount, 0);
1437 atomic_set(&tconInfoAllocCount, 0);
1438 atomic_set(&tcpSesAllocCount, 0);
1439 atomic_set(&tcpSesReconnectCount, 0);
1440 atomic_set(&tconInfoReconnectCount, 0);
1442 atomic_set(&bufAllocCount, 0);
1443 atomic_set(&smBufAllocCount, 0);
1444 #ifdef CONFIG_CIFS_STATS2
1445 atomic_set(&totBufAllocCount, 0);
1446 atomic_set(&totSmBufAllocCount, 0);
1447 if (slow_rsp_threshold < 1)
1448 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1449 else if (slow_rsp_threshold > 32767)
1451 "slow response threshold set higher than recommended (0 to 32767)\n");
1452 #endif /* CONFIG_CIFS_STATS2 */
1454 atomic_set(&midCount, 0);
1455 GlobalCurrentXid = 0;
1456 GlobalTotalActiveXid = 0;
1457 GlobalMaxActiveXid = 0;
1458 spin_lock_init(&cifs_tcp_ses_lock);
1459 spin_lock_init(&GlobalMid_Lock);
1461 cifs_lock_secret = get_random_u32();
1463 if (cifs_max_pending < 2) {
1464 cifs_max_pending = 2;
1465 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1466 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1467 cifs_max_pending = CIFS_MAX_REQ;
1468 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1472 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1475 goto out_clean_proc;
1478 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1479 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1480 if (!cifsoplockd_wq) {
1482 goto out_destroy_cifsiod_wq;
1485 rc = cifs_fscache_register();
1487 goto out_destroy_cifsoplockd_wq;
1489 rc = cifs_init_inodecache();
1491 goto out_unreg_fscache;
1493 rc = cifs_init_mids();
1495 goto out_destroy_inodecache;
1497 rc = cifs_init_request_bufs();
1499 goto out_destroy_mids;
1501 #ifdef CONFIG_CIFS_DFS_UPCALL
1502 rc = dfs_cache_init();
1504 goto out_destroy_request_bufs;
1505 #endif /* CONFIG_CIFS_DFS_UPCALL */
1506 #ifdef CONFIG_CIFS_UPCALL
1507 rc = init_cifs_spnego();
1509 goto out_destroy_dfs_cache;
1510 #endif /* CONFIG_CIFS_UPCALL */
1512 #ifdef CONFIG_CIFS_ACL
1513 rc = init_cifs_idmap();
1515 goto out_register_key_type;
1516 #endif /* CONFIG_CIFS_ACL */
1518 rc = register_filesystem(&cifs_fs_type);
1520 goto out_init_cifs_idmap;
1522 rc = register_filesystem(&smb3_fs_type);
1524 unregister_filesystem(&cifs_fs_type);
1525 goto out_init_cifs_idmap;
1530 out_init_cifs_idmap:
1531 #ifdef CONFIG_CIFS_ACL
1533 out_register_key_type:
1535 #ifdef CONFIG_CIFS_UPCALL
1537 out_destroy_dfs_cache:
1539 #ifdef CONFIG_CIFS_DFS_UPCALL
1540 dfs_cache_destroy();
1541 out_destroy_request_bufs:
1543 cifs_destroy_request_bufs();
1545 cifs_destroy_mids();
1546 out_destroy_inodecache:
1547 cifs_destroy_inodecache();
1549 cifs_fscache_unregister();
1550 out_destroy_cifsoplockd_wq:
1551 destroy_workqueue(cifsoplockd_wq);
1552 out_destroy_cifsiod_wq:
1553 destroy_workqueue(cifsiod_wq);
1562 cifs_dbg(NOISY, "exit_smb3\n");
1563 unregister_filesystem(&cifs_fs_type);
1564 unregister_filesystem(&smb3_fs_type);
1565 cifs_dfs_release_automount_timer();
1566 #ifdef CONFIG_CIFS_ACL
1569 #ifdef CONFIG_CIFS_UPCALL
1572 #ifdef CONFIG_CIFS_DFS_UPCALL
1573 dfs_cache_destroy();
1575 cifs_destroy_request_bufs();
1576 cifs_destroy_mids();
1577 cifs_destroy_inodecache();
1578 cifs_fscache_unregister();
1579 destroy_workqueue(cifsoplockd_wq);
1580 destroy_workqueue(cifsiod_wq);
1584 MODULE_AUTHOR("Steve French");
1585 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1587 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1588 "also older servers complying with the SNIA CIFS Specification)");
1589 MODULE_VERSION(CIFS_VERSION);
1590 MODULE_SOFTDEP("pre: arc4");
1591 MODULE_SOFTDEP("pre: des");
1592 MODULE_SOFTDEP("pre: ecb");
1593 MODULE_SOFTDEP("pre: hmac");
1594 MODULE_SOFTDEP("pre: md4");
1595 MODULE_SOFTDEP("pre: md5");
1596 MODULE_SOFTDEP("pre: nls");
1597 MODULE_SOFTDEP("pre: aes");
1598 MODULE_SOFTDEP("pre: cmac");
1599 MODULE_SOFTDEP("pre: sha256");
1600 MODULE_SOFTDEP("pre: sha512");
1601 MODULE_SOFTDEP("pre: aead2");
1602 MODULE_SOFTDEP("pre: ccm");
1603 module_init(init_cifs)
1604 module_exit(exit_cifs)