1 // SPDX-License-Identifier: GPL-2.0
3 * DFS referral cache routines
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
8 #include <linux/jhash.h>
9 #include <linux/ktime.h>
10 #include <linux/slab.h>
11 #include <linux/proc_fs.h>
12 #include <linux/nls.h>
13 #include <linux/workqueue.h>
16 #include "smb2proto.h"
17 #include "cifsproto.h"
18 #include "cifs_debug.h"
19 #include "cifs_unicode.h"
21 #include "fs_context.h"
23 #include "dfs_cache.h"
25 #define CACHE_HTABLE_SIZE 32
26 #define CACHE_MAX_ENTRIES 64
28 #define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
29 DFSREF_STORAGE_SERVER))
31 struct cache_dfs_tgt {
34 struct list_head list;
38 struct hlist_node hlist;
40 int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
41 int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
42 int srvtype; /* DFS_REREFERRAL_V3.ServerType */
43 int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
44 struct timespec64 etime;
45 int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
47 struct list_head tlist;
48 struct cache_dfs_tgt *tgthint;
54 struct smb3_fs_context ctx;
56 struct list_head list;
57 struct list_head rlist;
61 static struct kmem_cache *cache_slab __read_mostly;
62 static struct workqueue_struct *dfscache_wq __read_mostly;
65 static DEFINE_SPINLOCK(cache_ttl_lock);
67 static struct nls_table *cache_nlsc;
70 * Number of entries in the cache
72 static atomic_t cache_count;
74 static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
75 static DECLARE_RWSEM(htable_rw_lock);
77 static LIST_HEAD(vol_list);
78 static DEFINE_SPINLOCK(vol_list_lock);
80 static void refresh_cache_worker(struct work_struct *work);
82 static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
84 static int get_normalized_path(const char *path, const char **npath)
86 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
92 char *s = kstrdup(path, GFP_KERNEL);
95 convert_delimiter(s, '\\');
101 static inline void free_normalized_path(const char *path, const char *npath)
107 static inline bool cache_entry_expired(const struct cache_entry *ce)
109 struct timespec64 ts;
111 ktime_get_coarse_real_ts64(&ts);
112 return timespec64_compare(&ts, &ce->etime) >= 0;
115 static inline void free_tgts(struct cache_entry *ce)
117 struct cache_dfs_tgt *t, *n;
119 list_for_each_entry_safe(t, n, &ce->tlist, list) {
126 static inline void flush_cache_ent(struct cache_entry *ce)
128 hlist_del_init(&ce->hlist);
131 atomic_dec(&cache_count);
132 kmem_cache_free(cache_slab, ce);
135 static void flush_cache_ents(void)
139 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
140 struct hlist_head *l = &cache_htable[i];
141 struct hlist_node *n;
142 struct cache_entry *ce;
144 hlist_for_each_entry_safe(ce, n, l, hlist) {
145 if (!hlist_unhashed(&ce->hlist))
152 * dfs cache /proc file
154 static int dfscache_proc_show(struct seq_file *m, void *v)
157 struct cache_entry *ce;
158 struct cache_dfs_tgt *t;
160 seq_puts(m, "DFS cache\n---------\n");
162 down_read(&htable_rw_lock);
163 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
164 struct hlist_head *l = &cache_htable[i];
166 hlist_for_each_entry(ce, l, hlist) {
167 if (hlist_unhashed(&ce->hlist))
171 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
172 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
173 ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
174 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
175 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
177 list_for_each_entry(t, &ce->tlist, list) {
178 seq_printf(m, " %s%s\n",
180 ce->tgthint == t ? " (target hint)" : "");
184 up_read(&htable_rw_lock);
189 static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
190 size_t count, loff_t *ppos)
195 rc = get_user(c, buffer);
202 cifs_dbg(FYI, "clearing dfs cache\n");
204 down_write(&htable_rw_lock);
206 up_write(&htable_rw_lock);
211 static int dfscache_proc_open(struct inode *inode, struct file *file)
213 return single_open(file, dfscache_proc_show, NULL);
216 const struct proc_ops dfscache_proc_ops = {
217 .proc_open = dfscache_proc_open,
218 .proc_read = seq_read,
219 .proc_lseek = seq_lseek,
220 .proc_release = single_release,
221 .proc_write = dfscache_proc_write,
224 #ifdef CONFIG_CIFS_DEBUG2
225 static inline void dump_tgts(const struct cache_entry *ce)
227 struct cache_dfs_tgt *t;
229 cifs_dbg(FYI, "target list:\n");
230 list_for_each_entry(t, &ce->tlist, list) {
231 cifs_dbg(FYI, " %s%s\n", t->name,
232 ce->tgthint == t ? " (target hint)" : "");
236 static inline void dump_ce(const struct cache_entry *ce)
238 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
240 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
242 ce->hdr_flags, ce->ref_flags,
243 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
245 cache_entry_expired(ce) ? "yes" : "no");
249 static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
253 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
254 for (i = 0; i < numrefs; i++) {
255 const struct dfs_info3_param *ref = &refs[i];
260 "path_consumed: %d\n"
261 "server_type: 0x%x\n"
266 ref->flags, ref->path_consumed, ref->server_type,
267 ref->ref_flag, ref->path_name, ref->node_name,
268 ref->ttl, ref->ttl / 60);
274 #define dump_refs(r, n)
278 * dfs_cache_init - Initialize DFS referral cache.
280 * Return zero if initialized successfully, otherwise non-zero.
282 int dfs_cache_init(void)
287 dfscache_wq = alloc_workqueue("cifs-dfscache",
288 WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
292 cache_slab = kmem_cache_create("cifs_dfs_cache",
293 sizeof(struct cache_entry), 0,
294 SLAB_HWCACHE_ALIGN, NULL);
300 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
301 INIT_HLIST_HEAD(&cache_htable[i]);
303 atomic_set(&cache_count, 0);
304 cache_nlsc = load_nls_default();
306 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
310 destroy_workqueue(dfscache_wq);
314 static inline unsigned int cache_entry_hash(const void *data, int size)
318 h = jhash(data, size, 0);
319 return h & (CACHE_HTABLE_SIZE - 1);
322 /* Return target hint of a DFS cache entry */
323 static inline char *get_tgt_name(const struct cache_entry *ce)
325 struct cache_dfs_tgt *t = ce->tgthint;
327 return t ? t->name : ERR_PTR(-ENOENT);
330 /* Return expire time out of a new entry's TTL */
331 static inline struct timespec64 get_expire_time(int ttl)
333 struct timespec64 ts = {
337 struct timespec64 now;
339 ktime_get_coarse_real_ts64(&now);
340 return timespec64_add(now, ts);
343 /* Allocate a new DFS target */
344 static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
346 struct cache_dfs_tgt *t;
348 t = kmalloc(sizeof(*t), GFP_ATOMIC);
350 return ERR_PTR(-ENOMEM);
351 t->name = kstrdup(name, GFP_ATOMIC);
354 return ERR_PTR(-ENOMEM);
356 t->path_consumed = path_consumed;
357 INIT_LIST_HEAD(&t->list);
362 * Copy DFS referral information to a cache entry and conditionally update
365 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
366 struct cache_entry *ce, const char *tgthint)
370 ce->ttl = refs[0].ttl;
371 ce->etime = get_expire_time(ce->ttl);
372 ce->srvtype = refs[0].server_type;
373 ce->hdr_flags = refs[0].flags;
374 ce->ref_flags = refs[0].ref_flag;
375 ce->path_consumed = refs[0].path_consumed;
377 for (i = 0; i < numrefs; i++) {
378 struct cache_dfs_tgt *t;
380 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
385 if (tgthint && !strcasecmp(t->name, tgthint)) {
386 list_add(&t->list, &ce->tlist);
389 list_add_tail(&t->list, &ce->tlist);
394 ce->tgthint = list_first_entry_or_null(&ce->tlist,
395 struct cache_dfs_tgt, list);
400 /* Allocate a new cache entry */
401 static struct cache_entry *alloc_cache_entry(const char *path,
402 const struct dfs_info3_param *refs,
405 struct cache_entry *ce;
408 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
410 return ERR_PTR(-ENOMEM);
412 ce->path = kstrdup(path, GFP_KERNEL);
414 kmem_cache_free(cache_slab, ce);
415 return ERR_PTR(-ENOMEM);
417 INIT_HLIST_NODE(&ce->hlist);
418 INIT_LIST_HEAD(&ce->tlist);
420 rc = copy_ref_data(refs, numrefs, ce, NULL);
423 kmem_cache_free(cache_slab, ce);
429 /* Must be called with htable_rw_lock held */
430 static void remove_oldest_entry(void)
433 struct cache_entry *ce;
434 struct cache_entry *to_del = NULL;
436 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
437 struct hlist_head *l = &cache_htable[i];
439 hlist_for_each_entry(ce, l, hlist) {
440 if (hlist_unhashed(&ce->hlist))
442 if (!to_del || timespec64_compare(&ce->etime,
449 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
453 cifs_dbg(FYI, "%s: removing entry\n", __func__);
455 flush_cache_ent(to_del);
458 /* Add a new DFS cache entry */
459 static int add_cache_entry(const char *path, unsigned int hash,
460 struct dfs_info3_param *refs, int numrefs)
462 struct cache_entry *ce;
464 ce = alloc_cache_entry(path, refs, numrefs);
468 spin_lock(&cache_ttl_lock);
471 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
473 cache_ttl = min_t(int, cache_ttl, ce->ttl);
474 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
476 spin_unlock(&cache_ttl_lock);
478 down_write(&htable_rw_lock);
479 hlist_add_head(&ce->hlist, &cache_htable[hash]);
481 up_write(&htable_rw_lock);
486 static struct cache_entry *__lookup_cache_entry(const char *path)
488 struct cache_entry *ce;
492 h = cache_entry_hash(path, strlen(path));
494 hlist_for_each_entry(ce, &cache_htable[h], hlist) {
495 if (!strcasecmp(path, ce->path)) {
503 ce = ERR_PTR(-ENOENT);
508 * Find a DFS cache entry in hash table and optionally check prefix path against
510 * Use whole path components in the match.
511 * Must be called with htable_rw_lock held.
513 * Return ERR_PTR(-ENOENT) if the entry is not found.
515 static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
517 struct cache_entry *ce = ERR_PTR(-ENOENT);
524 npath = kstrdup(path, GFP_KERNEL);
526 return ERR_PTR(-ENOMEM);
530 while ((s = strchr(s, sep)) && ++cnt < 3)
534 h = cache_entry_hash(path, strlen(path));
535 ce = __lookup_cache_entry(path);
539 * Handle paths that have more than two path components and are a complete prefix of the DFS
540 * referral request path (@path).
542 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
544 h = cache_entry_hash(npath, strlen(npath));
545 e = npath + strlen(npath) - 1;
549 /* skip separators */
550 while (e > s && *e == sep)
558 ce = __lookup_cache_entry(npath);
560 h = cache_entry_hash(npath, strlen(npath));
565 /* backward until separator */
566 while (e > s && *e != sep)
576 static void __vol_release(struct vol_info *vi)
580 smb3_cleanup_fs_context_contents(&vi->ctx);
584 static void vol_release(struct kref *kref)
586 struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
588 spin_lock(&vol_list_lock);
590 spin_unlock(&vol_list_lock);
594 static inline void free_vol_list(void)
596 struct vol_info *vi, *nvi;
598 list_for_each_entry_safe(vi, nvi, &vol_list, list) {
599 list_del_init(&vi->list);
605 * dfs_cache_destroy - destroy DFS referral cache
607 void dfs_cache_destroy(void)
609 cancel_delayed_work_sync(&refresh_task);
610 unload_nls(cache_nlsc);
613 kmem_cache_destroy(cache_slab);
614 destroy_workqueue(dfscache_wq);
616 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
619 /* Must be called with htable_rw_lock held */
620 static int __update_cache_entry(const char *path,
621 const struct dfs_info3_param *refs,
625 struct cache_entry *ce;
628 ce = lookup_cache_entry(path, NULL);
633 s = ce->tgthint->name;
634 th = kstrdup(s, GFP_ATOMIC);
642 rc = copy_ref_data(refs, numrefs, ce, th);
649 static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
650 const struct nls_table *nls_codepage, int remap,
651 const char *path, struct dfs_info3_param **refs,
654 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
656 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
658 if (unlikely(!nls_codepage))
664 return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
665 nls_codepage, remap);
668 /* Update an expired cache entry by getting a new DFS referral from server */
669 static int update_cache_entry(const char *path,
670 const struct dfs_info3_param *refs,
676 down_write(&htable_rw_lock);
677 rc = __update_cache_entry(path, refs, numrefs);
678 up_write(&htable_rw_lock);
684 * Find, create or update a DFS cache entry.
686 * If the entry wasn't found, it will create a new one. Or if it was found but
687 * expired, then it will update the entry accordingly.
689 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
690 * handle them properly.
692 static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
693 const struct nls_table *nls_codepage, int remap,
694 const char *path, bool noreq)
698 struct cache_entry *ce;
699 struct dfs_info3_param *refs = NULL;
703 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
705 down_read(&htable_rw_lock);
707 ce = lookup_cache_entry(path, &hash);
710 * If @noreq is set, no requests will be sent to the server. Just return
714 up_read(&htable_rw_lock);
715 return PTR_ERR_OR_ZERO(ce);
719 if (!cache_entry_expired(ce)) {
721 up_read(&htable_rw_lock);
728 up_read(&htable_rw_lock);
731 * No entry was found.
733 * Request a new DFS referral in order to create a new cache entry, or
734 * updating an existing one.
736 rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
741 dump_refs(refs, numrefs);
744 rc = update_cache_entry(path, refs, numrefs);
748 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
749 cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
750 __func__, CACHE_MAX_ENTRIES);
751 down_write(&htable_rw_lock);
752 remove_oldest_entry();
753 up_write(&htable_rw_lock);
756 rc = add_cache_entry(path, hash, refs, numrefs);
758 atomic_inc(&cache_count);
761 free_dfs_info_array(refs, numrefs);
766 * Set up a DFS referral from a given cache entry.
768 * Must be called with htable_rw_lock held.
770 static int setup_referral(const char *path, struct cache_entry *ce,
771 struct dfs_info3_param *ref, const char *target)
775 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
777 memset(ref, 0, sizeof(*ref));
779 ref->path_name = kstrdup(path, GFP_ATOMIC);
783 ref->node_name = kstrdup(target, GFP_ATOMIC);
784 if (!ref->node_name) {
789 ref->path_consumed = ce->path_consumed;
791 ref->server_type = ce->srvtype;
792 ref->ref_flag = ce->ref_flags;
793 ref->flags = ce->hdr_flags;
798 kfree(ref->path_name);
799 ref->path_name = NULL;
803 /* Return target list of a DFS cache entry */
804 static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
807 struct list_head *head = &tl->tl_list;
808 struct cache_dfs_tgt *t;
809 struct dfs_cache_tgt_iterator *it, *nit;
811 memset(tl, 0, sizeof(*tl));
812 INIT_LIST_HEAD(head);
814 list_for_each_entry(t, &ce->tlist, list) {
815 it = kzalloc(sizeof(*it), GFP_ATOMIC);
821 it->it_name = kstrdup(t->name, GFP_ATOMIC);
827 it->it_path_consumed = t->path_consumed;
829 if (ce->tgthint == t)
830 list_add(&it->it_list, head);
832 list_add_tail(&it->it_list, head);
835 tl->tl_numtgts = ce->numtgts;
840 list_for_each_entry_safe(it, nit, head, it_list) {
848 * dfs_cache_find - find a DFS cache entry
850 * If it doesn't find the cache entry, then it will get a DFS referral
851 * for @path and create a new entry.
853 * In case the cache entry exists but expired, it will get a DFS referral
854 * for @path and then update the respective cache entry.
856 * These parameters are passed down to the get_dfs_refer() call if it
857 * needs to be issued:
859 * @ses: smb session to issue the request on
860 * @nls_codepage: charset conversion
861 * @remap: path character remapping type
862 * @path: path to lookup in DFS referral cache.
864 * @ref: when non-NULL, store single DFS referral result in it.
865 * @tgt_list: when non-NULL, store complete DFS target list in it.
867 * Return zero if the target was found, otherwise non-zero.
869 int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
870 const struct nls_table *nls_codepage, int remap,
871 const char *path, struct dfs_info3_param *ref,
872 struct dfs_cache_tgt_list *tgt_list)
876 struct cache_entry *ce;
878 rc = get_normalized_path(path, &npath);
882 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
886 down_read(&htable_rw_lock);
888 ce = lookup_cache_entry(npath, NULL);
890 up_read(&htable_rw_lock);
896 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
900 rc = get_targets(ce, tgt_list);
902 up_read(&htable_rw_lock);
905 free_normalized_path(path, npath);
910 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
911 * the currently connected server.
913 * NOTE: This function will neither update a cache entry in case it was
914 * expired, nor create a new cache entry if @path hasn't been found. It heavily
915 * relies on an existing cache entry.
917 * @path: path to lookup in the DFS referral cache.
918 * @ref: when non-NULL, store single DFS referral result in it.
919 * @tgt_list: when non-NULL, store complete DFS target list in it.
921 * Return 0 if successful.
922 * Return -ENOENT if the entry was not found.
923 * Return non-zero for other errors.
925 int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
926 struct dfs_cache_tgt_list *tgt_list)
930 struct cache_entry *ce;
932 rc = get_normalized_path(path, &npath);
936 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
938 down_read(&htable_rw_lock);
940 ce = lookup_cache_entry(npath, NULL);
947 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
951 rc = get_targets(ce, tgt_list);
954 up_read(&htable_rw_lock);
955 free_normalized_path(path, npath);
961 * dfs_cache_update_tgthint - update target hint of a DFS cache entry
963 * If it doesn't find the cache entry, then it will get a DFS referral for @path
964 * and create a new entry.
966 * In case the cache entry exists but expired, it will get a DFS referral
967 * for @path and then update the respective cache entry.
971 * @nls_codepage: charset conversion
972 * @remap: type of character remapping for paths
973 * @path: path to lookup in DFS referral cache.
974 * @it: DFS target iterator
976 * Return zero if the target hint was updated successfully, otherwise non-zero.
978 int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
979 const struct nls_table *nls_codepage, int remap,
981 const struct dfs_cache_tgt_iterator *it)
985 struct cache_entry *ce;
986 struct cache_dfs_tgt *t;
988 rc = get_normalized_path(path, &npath);
992 cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
994 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
998 down_write(&htable_rw_lock);
1000 ce = lookup_cache_entry(npath, NULL);
1008 if (likely(!strcasecmp(it->it_name, t->name)))
1011 list_for_each_entry(t, &ce->tlist, list) {
1012 if (!strcasecmp(t->name, it->it_name)) {
1014 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1021 up_write(&htable_rw_lock);
1023 free_normalized_path(path, npath);
1029 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
1030 * without sending any requests to the currently connected server.
1032 * NOTE: This function will neither update a cache entry in case it was
1033 * expired, nor create a new cache entry if @path hasn't been found. It heavily
1034 * relies on an existing cache entry.
1036 * @path: path to lookup in DFS referral cache.
1037 * @it: target iterator which contains the target hint to update the cache
1040 * Return zero if the target hint was updated successfully, otherwise non-zero.
1042 int dfs_cache_noreq_update_tgthint(const char *path,
1043 const struct dfs_cache_tgt_iterator *it)
1047 struct cache_entry *ce;
1048 struct cache_dfs_tgt *t;
1053 rc = get_normalized_path(path, &npath);
1057 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1059 down_write(&htable_rw_lock);
1061 ce = lookup_cache_entry(npath, NULL);
1070 if (unlikely(!strcasecmp(it->it_name, t->name)))
1073 list_for_each_entry(t, &ce->tlist, list) {
1074 if (!strcasecmp(t->name, it->it_name)) {
1076 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1083 up_write(&htable_rw_lock);
1084 free_normalized_path(path, npath);
1090 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1091 * target iterator (@it).
1093 * @path: path to lookup in DFS referral cache.
1094 * @it: DFS target iterator.
1095 * @ref: DFS referral pointer to set up the gathered information.
1097 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1099 int dfs_cache_get_tgt_referral(const char *path,
1100 const struct dfs_cache_tgt_iterator *it,
1101 struct dfs_info3_param *ref)
1105 struct cache_entry *ce;
1110 rc = get_normalized_path(path, &npath);
1114 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1116 down_read(&htable_rw_lock);
1118 ce = lookup_cache_entry(npath, NULL);
1124 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1126 rc = setup_referral(path, ce, ref, it->it_name);
1129 up_read(&htable_rw_lock);
1130 free_normalized_path(path, npath);
1136 * dfs_cache_add_vol - add a cifs context during mount() that will be handled by
1137 * DFS cache refresh worker.
1139 * @mntdata: mount data.
1140 * @ctx: cifs context.
1141 * @fullpath: origin full path.
1143 * Return zero if context was set up correctly, otherwise non-zero.
1145 int dfs_cache_add_vol(char *mntdata, struct smb3_fs_context *ctx, const char *fullpath)
1148 struct vol_info *vi;
1150 if (!ctx || !fullpath || !mntdata)
1153 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1155 vi = kzalloc(sizeof(*vi), GFP_KERNEL);
1159 vi->fullpath = kstrdup(fullpath, GFP_KERNEL);
1160 if (!vi->fullpath) {
1165 rc = smb3_fs_context_dup(&vi->ctx, ctx);
1167 goto err_free_fullpath;
1169 vi->mntdata = mntdata;
1170 spin_lock_init(&vi->ctx_lock);
1171 kref_init(&vi->refcnt);
1173 spin_lock(&vol_list_lock);
1174 list_add_tail(&vi->list, &vol_list);
1175 spin_unlock(&vol_list_lock);
1180 kfree(vi->fullpath);
1186 /* Must be called with vol_list_lock held */
1187 static struct vol_info *find_vol(const char *fullpath)
1189 struct vol_info *vi;
1191 list_for_each_entry(vi, &vol_list, list) {
1192 cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
1193 if (!strcasecmp(vi->fullpath, fullpath))
1196 return ERR_PTR(-ENOENT);
1200 * dfs_cache_update_vol - update vol info in DFS cache after failover
1202 * @fullpath: fullpath to look up in volume list.
1203 * @server: TCP ses pointer.
1205 * Return zero if volume was updated, otherwise non-zero.
1207 int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
1209 struct vol_info *vi;
1211 if (!fullpath || !server)
1214 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1216 spin_lock(&vol_list_lock);
1217 vi = find_vol(fullpath);
1219 spin_unlock(&vol_list_lock);
1222 kref_get(&vi->refcnt);
1223 spin_unlock(&vol_list_lock);
1225 cifs_dbg(FYI, "%s: updating volume info\n", __func__);
1226 spin_lock(&vi->ctx_lock);
1227 memcpy(&vi->ctx.dstaddr, &server->dstaddr,
1228 sizeof(vi->ctx.dstaddr));
1229 spin_unlock(&vi->ctx_lock);
1231 kref_put(&vi->refcnt, vol_release);
1237 * dfs_cache_del_vol - remove volume info in DFS cache during umount()
1239 * @fullpath: fullpath to look up in volume list.
1241 void dfs_cache_del_vol(const char *fullpath)
1243 struct vol_info *vi;
1245 if (!fullpath || !*fullpath)
1248 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1250 spin_lock(&vol_list_lock);
1251 vi = find_vol(fullpath);
1252 spin_unlock(&vol_list_lock);
1255 kref_put(&vi->refcnt, vol_release);
1259 * dfs_cache_get_tgt_share - parse a DFS target
1261 * @path: DFS full path
1262 * @it: DFS target iterator.
1263 * @share: tree name.
1264 * @prefix: prefix path.
1266 * Return zero if target was parsed correctly, otherwise non-zero.
1268 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
1269 char **share, char **prefix)
1273 size_t plen1, plen2;
1275 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1281 sep = it->it_name[0];
1282 if (sep != '\\' && sep != '/')
1285 s = strchr(it->it_name + 1, sep);
1289 /* point to prefix in target node */
1290 s = strchrnul(s + 1, sep);
1292 /* extract target share */
1293 *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
1297 /* skip separator */
1300 /* point to prefix in DFS path */
1301 p = path + it->it_path_consumed;
1305 /* merge prefix paths from DFS path and target node */
1306 plen1 = it->it_name + strlen(it->it_name) - s;
1307 plen2 = path + strlen(path) - p;
1308 if (plen1 || plen2) {
1309 len = plen1 + plen2 + 2;
1310 *prefix = kmalloc(len, GFP_KERNEL);
1317 scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
1319 strscpy(*prefix, p, len);
1324 /* Get all tcons that are within a DFS namespace and can be refreshed */
1325 static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
1327 struct cifs_ses *ses;
1328 struct cifs_tcon *tcon;
1330 INIT_LIST_HEAD(head);
1332 spin_lock(&cifs_tcp_ses_lock);
1333 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1334 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1335 if (!tcon->need_reconnect && !tcon->need_reopen_files &&
1338 list_add_tail(&tcon->ulist, head);
1341 if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
1342 ses->tcon_ipc->dfs_path) {
1343 list_add_tail(&ses->tcon_ipc->ulist, head);
1346 spin_unlock(&cifs_tcp_ses_lock);
1349 static bool is_dfs_link(const char *path)
1353 s = strchr(path + 1, '\\');
1356 return !!strchr(s + 1, '\\');
1359 static char *get_dfs_root(const char *path)
1363 s = strchr(path + 1, '\\');
1365 return ERR_PTR(-EINVAL);
1367 s = strchr(s + 1, '\\');
1369 return ERR_PTR(-EINVAL);
1371 npath = kstrndup(path, s - path, GFP_KERNEL);
1373 return ERR_PTR(-ENOMEM);
1378 static inline void put_tcp_server(struct TCP_Server_Info *server)
1380 cifs_put_tcp_session(server, 0);
1383 static struct TCP_Server_Info *get_tcp_server(struct smb3_fs_context *ctx)
1385 struct TCP_Server_Info *server;
1387 server = cifs_find_tcp_session(ctx);
1388 if (IS_ERR_OR_NULL(server))
1391 spin_lock(&GlobalMid_Lock);
1392 if (server->tcpStatus != CifsGood) {
1393 spin_unlock(&GlobalMid_Lock);
1394 put_tcp_server(server);
1397 spin_unlock(&GlobalMid_Lock);
1402 /* Find root SMB session out of a DFS link path */
1403 static struct cifs_ses *find_root_ses(struct vol_info *vi,
1404 struct cifs_tcon *tcon,
1409 struct cache_entry *ce;
1410 struct dfs_info3_param ref = {0};
1411 char *mdata = NULL, *devname = NULL;
1412 struct TCP_Server_Info *server;
1413 struct cifs_ses *ses;
1414 struct smb3_fs_context ctx = {NULL};
1416 rpath = get_dfs_root(path);
1418 return ERR_CAST(rpath);
1420 down_read(&htable_rw_lock);
1422 ce = lookup_cache_entry(rpath, NULL);
1424 up_read(&htable_rw_lock);
1429 rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
1431 up_read(&htable_rw_lock);
1436 up_read(&htable_rw_lock);
1438 mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
1440 free_dfs_info_param(&ref);
1442 if (IS_ERR(mdata)) {
1443 ses = ERR_CAST(mdata);
1448 rc = cifs_setup_volume_info(&ctx, NULL, devname);
1455 server = get_tcp_server(&ctx);
1457 ses = ERR_PTR(-EHOSTDOWN);
1461 ses = cifs_get_smb_ses(server, &ctx);
1464 smb3_cleanup_fs_context_contents(&ctx);
1472 /* Refresh DFS cache entry from a given tcon */
1473 static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
1477 const char *path, *npath;
1478 struct cache_entry *ce;
1479 struct cifs_ses *root_ses = NULL, *ses;
1480 struct dfs_info3_param *refs = NULL;
1485 path = tcon->dfs_path + 1;
1487 rc = get_normalized_path(path, &npath);
1491 down_read(&htable_rw_lock);
1493 ce = lookup_cache_entry(npath, NULL);
1496 up_read(&htable_rw_lock);
1500 if (!cache_entry_expired(ce)) {
1501 up_read(&htable_rw_lock);
1505 up_read(&htable_rw_lock);
1507 /* If it's a DFS Link, then use root SMB session for refreshing it */
1508 if (is_dfs_link(npath)) {
1509 ses = root_ses = find_root_ses(vi, tcon, npath);
1519 rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
1522 dump_refs(refs, numrefs);
1523 rc = update_cache_entry(npath, refs, numrefs);
1524 free_dfs_info_array(refs, numrefs);
1528 cifs_put_smb_ses(root_ses);
1531 free_normalized_path(path, npath);
1539 * Worker that will refresh DFS cache based on lowest TTL value from a DFS
1542 static void refresh_cache_worker(struct work_struct *work)
1544 struct vol_info *vi, *nvi;
1545 struct TCP_Server_Info *server;
1548 struct cifs_tcon *tcon, *ntcon;
1552 * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
1555 spin_lock(&vol_list_lock);
1556 list_for_each_entry(vi, &vol_list, list) {
1557 server = get_tcp_server(&vi->ctx);
1561 kref_get(&vi->refcnt);
1562 list_add_tail(&vi->rlist, &vols);
1563 put_tcp_server(server);
1565 spin_unlock(&vol_list_lock);
1567 /* Walk through all TCONs and refresh any expired cache entry */
1568 list_for_each_entry_safe(vi, nvi, &vols, rlist) {
1569 spin_lock(&vi->ctx_lock);
1570 server = get_tcp_server(&vi->ctx);
1571 spin_unlock(&vi->ctx_lock);
1576 get_tcons(server, &tcons);
1579 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1581 * Skip tcp server if any of its tcons failed to refresh
1582 * (possibily due to reconnects).
1585 rc = refresh_tcon(vi, tcon);
1587 list_del_init(&tcon->ulist);
1588 cifs_put_tcon(tcon);
1591 put_tcp_server(server);
1594 list_del_init(&vi->rlist);
1595 kref_put(&vi->refcnt, vol_release);
1598 spin_lock(&cache_ttl_lock);
1599 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1600 spin_unlock(&cache_ttl_lock);