1 // SPDX-License-Identifier: GPL-2.0
3 * DFS referral cache routines
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
8 #include <linux/jhash.h>
9 #include <linux/ktime.h>
10 #include <linux/slab.h>
11 #include <linux/proc_fs.h>
12 #include <linux/nls.h>
13 #include <linux/workqueue.h>
16 #include "smb2proto.h"
17 #include "cifsproto.h"
18 #include "cifs_debug.h"
19 #include "cifs_unicode.h"
21 #include "fs_context.h"
23 #include "dfs_cache.h"
25 #define CACHE_HTABLE_SIZE 32
26 #define CACHE_MAX_ENTRIES 64
28 #define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
29 DFSREF_STORAGE_SERVER))
31 struct cache_dfs_tgt {
34 struct list_head list;
38 struct hlist_node hlist;
40 int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
41 int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
42 int srvtype; /* DFS_REREFERRAL_V3.ServerType */
43 int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
44 struct timespec64 etime;
45 int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
47 struct list_head tlist;
48 struct cache_dfs_tgt *tgthint;
54 struct smb3_fs_context ctx;
56 struct list_head list;
57 struct list_head rlist;
61 static struct kmem_cache *cache_slab __read_mostly;
62 static struct workqueue_struct *dfscache_wq __read_mostly;
65 static DEFINE_SPINLOCK(cache_ttl_lock);
67 static struct nls_table *cache_nlsc;
70 * Number of entries in the cache
72 static atomic_t cache_count;
74 static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
75 static DECLARE_RWSEM(htable_rw_lock);
77 static LIST_HEAD(vol_list);
78 static DEFINE_SPINLOCK(vol_list_lock);
80 static void refresh_cache_worker(struct work_struct *work);
82 static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
84 static int get_normalized_path(const char *path, const char **npath)
86 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
92 char *s = kstrdup(path, GFP_KERNEL);
95 convert_delimiter(s, '\\');
101 static inline void free_normalized_path(const char *path, const char *npath)
107 static inline bool cache_entry_expired(const struct cache_entry *ce)
109 struct timespec64 ts;
111 ktime_get_coarse_real_ts64(&ts);
112 return timespec64_compare(&ts, &ce->etime) >= 0;
115 static inline void free_tgts(struct cache_entry *ce)
117 struct cache_dfs_tgt *t, *n;
119 list_for_each_entry_safe(t, n, &ce->tlist, list) {
126 static inline void flush_cache_ent(struct cache_entry *ce)
128 hlist_del_init(&ce->hlist);
131 atomic_dec(&cache_count);
132 kmem_cache_free(cache_slab, ce);
135 static void flush_cache_ents(void)
139 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
140 struct hlist_head *l = &cache_htable[i];
141 struct hlist_node *n;
142 struct cache_entry *ce;
144 hlist_for_each_entry_safe(ce, n, l, hlist) {
145 if (!hlist_unhashed(&ce->hlist))
152 * dfs cache /proc file
154 static int dfscache_proc_show(struct seq_file *m, void *v)
157 struct cache_entry *ce;
158 struct cache_dfs_tgt *t;
160 seq_puts(m, "DFS cache\n---------\n");
162 down_read(&htable_rw_lock);
163 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
164 struct hlist_head *l = &cache_htable[i];
166 hlist_for_each_entry(ce, l, hlist) {
167 if (hlist_unhashed(&ce->hlist))
171 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
172 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
173 ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
174 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
175 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
177 list_for_each_entry(t, &ce->tlist, list) {
178 seq_printf(m, " %s%s\n",
180 ce->tgthint == t ? " (target hint)" : "");
184 up_read(&htable_rw_lock);
189 static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
190 size_t count, loff_t *ppos)
195 rc = get_user(c, buffer);
202 cifs_dbg(FYI, "clearing dfs cache\n");
204 down_write(&htable_rw_lock);
206 up_write(&htable_rw_lock);
211 static int dfscache_proc_open(struct inode *inode, struct file *file)
213 return single_open(file, dfscache_proc_show, NULL);
216 const struct proc_ops dfscache_proc_ops = {
217 .proc_open = dfscache_proc_open,
218 .proc_read = seq_read,
219 .proc_lseek = seq_lseek,
220 .proc_release = single_release,
221 .proc_write = dfscache_proc_write,
224 #ifdef CONFIG_CIFS_DEBUG2
225 static inline void dump_tgts(const struct cache_entry *ce)
227 struct cache_dfs_tgt *t;
229 cifs_dbg(FYI, "target list:\n");
230 list_for_each_entry(t, &ce->tlist, list) {
231 cifs_dbg(FYI, " %s%s\n", t->name,
232 ce->tgthint == t ? " (target hint)" : "");
236 static inline void dump_ce(const struct cache_entry *ce)
238 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
240 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
242 ce->hdr_flags, ce->ref_flags,
243 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
245 cache_entry_expired(ce) ? "yes" : "no");
249 static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
253 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
254 for (i = 0; i < numrefs; i++) {
255 const struct dfs_info3_param *ref = &refs[i];
260 "path_consumed: %d\n"
261 "server_type: 0x%x\n"
266 ref->flags, ref->path_consumed, ref->server_type,
267 ref->ref_flag, ref->path_name, ref->node_name,
268 ref->ttl, ref->ttl / 60);
274 #define dump_refs(r, n)
278 * dfs_cache_init - Initialize DFS referral cache.
280 * Return zero if initialized successfully, otherwise non-zero.
282 int dfs_cache_init(void)
287 dfscache_wq = alloc_workqueue("cifs-dfscache",
288 WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
292 cache_slab = kmem_cache_create("cifs_dfs_cache",
293 sizeof(struct cache_entry), 0,
294 SLAB_HWCACHE_ALIGN, NULL);
300 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
301 INIT_HLIST_HEAD(&cache_htable[i]);
303 atomic_set(&cache_count, 0);
304 cache_nlsc = load_nls_default();
306 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
310 destroy_workqueue(dfscache_wq);
314 static inline unsigned int cache_entry_hash(const void *data, int size)
318 h = jhash(data, size, 0);
319 return h & (CACHE_HTABLE_SIZE - 1);
322 /* Return target hint of a DFS cache entry */
323 static inline char *get_tgt_name(const struct cache_entry *ce)
325 struct cache_dfs_tgt *t = ce->tgthint;
327 return t ? t->name : ERR_PTR(-ENOENT);
330 /* Return expire time out of a new entry's TTL */
331 static inline struct timespec64 get_expire_time(int ttl)
333 struct timespec64 ts = {
337 struct timespec64 now;
339 ktime_get_coarse_real_ts64(&now);
340 return timespec64_add(now, ts);
343 /* Allocate a new DFS target */
344 static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
346 struct cache_dfs_tgt *t;
348 t = kmalloc(sizeof(*t), GFP_ATOMIC);
350 return ERR_PTR(-ENOMEM);
351 t->name = kstrdup(name, GFP_ATOMIC);
354 return ERR_PTR(-ENOMEM);
356 t->path_consumed = path_consumed;
357 INIT_LIST_HEAD(&t->list);
362 * Copy DFS referral information to a cache entry and conditionally update
365 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
366 struct cache_entry *ce, const char *tgthint)
370 ce->ttl = refs[0].ttl;
371 ce->etime = get_expire_time(ce->ttl);
372 ce->srvtype = refs[0].server_type;
373 ce->hdr_flags = refs[0].flags;
374 ce->ref_flags = refs[0].ref_flag;
375 ce->path_consumed = refs[0].path_consumed;
377 for (i = 0; i < numrefs; i++) {
378 struct cache_dfs_tgt *t;
380 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
385 if (tgthint && !strcasecmp(t->name, tgthint)) {
386 list_add(&t->list, &ce->tlist);
389 list_add_tail(&t->list, &ce->tlist);
394 ce->tgthint = list_first_entry_or_null(&ce->tlist,
395 struct cache_dfs_tgt, list);
400 /* Allocate a new cache entry */
401 static struct cache_entry *alloc_cache_entry(const char *path,
402 const struct dfs_info3_param *refs,
405 struct cache_entry *ce;
408 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
410 return ERR_PTR(-ENOMEM);
412 ce->path = kstrdup(path, GFP_KERNEL);
414 kmem_cache_free(cache_slab, ce);
415 return ERR_PTR(-ENOMEM);
417 INIT_HLIST_NODE(&ce->hlist);
418 INIT_LIST_HEAD(&ce->tlist);
420 rc = copy_ref_data(refs, numrefs, ce, NULL);
423 kmem_cache_free(cache_slab, ce);
429 /* Must be called with htable_rw_lock held */
430 static void remove_oldest_entry(void)
433 struct cache_entry *ce;
434 struct cache_entry *to_del = NULL;
436 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
437 struct hlist_head *l = &cache_htable[i];
439 hlist_for_each_entry(ce, l, hlist) {
440 if (hlist_unhashed(&ce->hlist))
442 if (!to_del || timespec64_compare(&ce->etime,
449 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
453 cifs_dbg(FYI, "%s: removing entry\n", __func__);
455 flush_cache_ent(to_del);
458 /* Add a new DFS cache entry */
459 static int add_cache_entry(const char *path, unsigned int hash,
460 struct dfs_info3_param *refs, int numrefs)
462 struct cache_entry *ce;
464 ce = alloc_cache_entry(path, refs, numrefs);
468 spin_lock(&cache_ttl_lock);
471 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
473 cache_ttl = min_t(int, cache_ttl, ce->ttl);
474 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
476 spin_unlock(&cache_ttl_lock);
478 down_write(&htable_rw_lock);
479 hlist_add_head(&ce->hlist, &cache_htable[hash]);
481 up_write(&htable_rw_lock);
486 static struct cache_entry *__lookup_cache_entry(const char *path)
488 struct cache_entry *ce;
492 h = cache_entry_hash(path, strlen(path));
494 hlist_for_each_entry(ce, &cache_htable[h], hlist) {
495 if (!strcasecmp(path, ce->path)) {
503 ce = ERR_PTR(-ENOENT);
508 * Find a DFS cache entry in hash table and optionally check prefix path against
510 * Use whole path components in the match.
511 * Must be called with htable_rw_lock held.
513 * Return ERR_PTR(-ENOENT) if the entry is not found.
515 static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
517 struct cache_entry *ce = ERR_PTR(-ENOENT);
524 npath = kstrdup(path, GFP_KERNEL);
526 return ERR_PTR(-ENOMEM);
530 while ((s = strchr(s, sep)) && ++cnt < 3)
534 h = cache_entry_hash(path, strlen(path));
535 ce = __lookup_cache_entry(path);
539 * Handle paths that have more than two path components and are a complete prefix of the DFS
540 * referral request path (@path).
542 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
544 h = cache_entry_hash(npath, strlen(npath));
545 e = npath + strlen(npath) - 1;
549 /* skip separators */
550 while (e > s && *e == sep)
558 ce = __lookup_cache_entry(npath);
560 h = cache_entry_hash(npath, strlen(npath));
565 /* backward until separator */
566 while (e > s && *e != sep)
576 static void __vol_release(struct vol_info *vi)
580 smb3_cleanup_fs_context_contents(&vi->ctx);
584 static void vol_release(struct kref *kref)
586 struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
588 spin_lock(&vol_list_lock);
590 spin_unlock(&vol_list_lock);
594 static inline void free_vol_list(void)
596 struct vol_info *vi, *nvi;
598 list_for_each_entry_safe(vi, nvi, &vol_list, list) {
599 list_del_init(&vi->list);
605 * dfs_cache_destroy - destroy DFS referral cache
607 void dfs_cache_destroy(void)
609 cancel_delayed_work_sync(&refresh_task);
610 unload_nls(cache_nlsc);
613 kmem_cache_destroy(cache_slab);
614 destroy_workqueue(dfscache_wq);
616 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
619 /* Must be called with htable_rw_lock held */
620 static int __update_cache_entry(const char *path,
621 const struct dfs_info3_param *refs,
625 struct cache_entry *ce;
628 ce = lookup_cache_entry(path, NULL);
633 s = ce->tgthint->name;
634 th = kstrdup(s, GFP_ATOMIC);
642 rc = copy_ref_data(refs, numrefs, ce, th);
649 static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
650 const struct nls_table *nls_codepage, int remap,
651 const char *path, struct dfs_info3_param **refs,
654 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
656 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
658 if (unlikely(!nls_codepage))
664 return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
665 nls_codepage, remap);
668 /* Update an expired cache entry by getting a new DFS referral from server */
669 static int update_cache_entry(const char *path,
670 const struct dfs_info3_param *refs,
676 down_write(&htable_rw_lock);
677 rc = __update_cache_entry(path, refs, numrefs);
678 up_write(&htable_rw_lock);
684 * Find, create or update a DFS cache entry.
686 * If the entry wasn't found, it will create a new one. Or if it was found but
687 * expired, then it will update the entry accordingly.
689 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
690 * handle them properly.
692 static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
693 const struct nls_table *nls_codepage, int remap, const char *path)
697 struct cache_entry *ce;
698 struct dfs_info3_param *refs = NULL;
702 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
704 down_read(&htable_rw_lock);
706 ce = lookup_cache_entry(path, &hash);
708 if (!cache_entry_expired(ce)) {
710 up_read(&htable_rw_lock);
717 up_read(&htable_rw_lock);
720 * No entry was found.
722 * Request a new DFS referral in order to create a new cache entry, or
723 * updating an existing one.
725 rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
730 dump_refs(refs, numrefs);
733 rc = update_cache_entry(path, refs, numrefs);
737 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
738 cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
739 __func__, CACHE_MAX_ENTRIES);
740 down_write(&htable_rw_lock);
741 remove_oldest_entry();
742 up_write(&htable_rw_lock);
745 rc = add_cache_entry(path, hash, refs, numrefs);
747 atomic_inc(&cache_count);
750 free_dfs_info_array(refs, numrefs);
755 * Set up a DFS referral from a given cache entry.
757 * Must be called with htable_rw_lock held.
759 static int setup_referral(const char *path, struct cache_entry *ce,
760 struct dfs_info3_param *ref, const char *target)
764 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
766 memset(ref, 0, sizeof(*ref));
768 ref->path_name = kstrdup(path, GFP_ATOMIC);
772 ref->node_name = kstrdup(target, GFP_ATOMIC);
773 if (!ref->node_name) {
778 ref->path_consumed = ce->path_consumed;
780 ref->server_type = ce->srvtype;
781 ref->ref_flag = ce->ref_flags;
782 ref->flags = ce->hdr_flags;
787 kfree(ref->path_name);
788 ref->path_name = NULL;
792 /* Return target list of a DFS cache entry */
793 static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
796 struct list_head *head = &tl->tl_list;
797 struct cache_dfs_tgt *t;
798 struct dfs_cache_tgt_iterator *it, *nit;
800 memset(tl, 0, sizeof(*tl));
801 INIT_LIST_HEAD(head);
803 list_for_each_entry(t, &ce->tlist, list) {
804 it = kzalloc(sizeof(*it), GFP_ATOMIC);
810 it->it_name = kstrdup(t->name, GFP_ATOMIC);
816 it->it_path_consumed = t->path_consumed;
818 if (ce->tgthint == t)
819 list_add(&it->it_list, head);
821 list_add_tail(&it->it_list, head);
824 tl->tl_numtgts = ce->numtgts;
829 list_for_each_entry_safe(it, nit, head, it_list) {
837 * dfs_cache_find - find a DFS cache entry
839 * If it doesn't find the cache entry, then it will get a DFS referral
840 * for @path and create a new entry.
842 * In case the cache entry exists but expired, it will get a DFS referral
843 * for @path and then update the respective cache entry.
845 * These parameters are passed down to the get_dfs_refer() call if it
846 * needs to be issued:
848 * @ses: smb session to issue the request on
849 * @nls_codepage: charset conversion
850 * @remap: path character remapping type
851 * @path: path to lookup in DFS referral cache.
853 * @ref: when non-NULL, store single DFS referral result in it.
854 * @tgt_list: when non-NULL, store complete DFS target list in it.
856 * Return zero if the target was found, otherwise non-zero.
858 int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
859 const struct nls_table *nls_codepage, int remap,
860 const char *path, struct dfs_info3_param *ref,
861 struct dfs_cache_tgt_list *tgt_list)
865 struct cache_entry *ce;
867 rc = get_normalized_path(path, &npath);
871 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath);
875 down_read(&htable_rw_lock);
877 ce = lookup_cache_entry(npath, NULL);
879 up_read(&htable_rw_lock);
885 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
889 rc = get_targets(ce, tgt_list);
891 up_read(&htable_rw_lock);
894 free_normalized_path(path, npath);
899 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
900 * the currently connected server.
902 * NOTE: This function will neither update a cache entry in case it was
903 * expired, nor create a new cache entry if @path hasn't been found. It heavily
904 * relies on an existing cache entry.
906 * @path: path to lookup in the DFS referral cache.
907 * @ref: when non-NULL, store single DFS referral result in it.
908 * @tgt_list: when non-NULL, store complete DFS target list in it.
910 * Return 0 if successful.
911 * Return -ENOENT if the entry was not found.
912 * Return non-zero for other errors.
914 int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
915 struct dfs_cache_tgt_list *tgt_list)
919 struct cache_entry *ce;
921 rc = get_normalized_path(path, &npath);
925 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
927 down_read(&htable_rw_lock);
929 ce = lookup_cache_entry(npath, NULL);
936 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
940 rc = get_targets(ce, tgt_list);
943 up_read(&htable_rw_lock);
944 free_normalized_path(path, npath);
950 * dfs_cache_update_tgthint - update target hint of a DFS cache entry
952 * If it doesn't find the cache entry, then it will get a DFS referral for @path
953 * and create a new entry.
955 * In case the cache entry exists but expired, it will get a DFS referral
956 * for @path and then update the respective cache entry.
960 * @nls_codepage: charset conversion
961 * @remap: type of character remapping for paths
962 * @path: path to lookup in DFS referral cache.
963 * @it: DFS target iterator
965 * Return zero if the target hint was updated successfully, otherwise non-zero.
967 int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
968 const struct nls_table *nls_codepage, int remap,
970 const struct dfs_cache_tgt_iterator *it)
974 struct cache_entry *ce;
975 struct cache_dfs_tgt *t;
977 rc = get_normalized_path(path, &npath);
981 cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
983 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath);
987 down_write(&htable_rw_lock);
989 ce = lookup_cache_entry(npath, NULL);
997 if (likely(!strcasecmp(it->it_name, t->name)))
1000 list_for_each_entry(t, &ce->tlist, list) {
1001 if (!strcasecmp(t->name, it->it_name)) {
1003 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1010 up_write(&htable_rw_lock);
1012 free_normalized_path(path, npath);
1018 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
1019 * without sending any requests to the currently connected server.
1021 * NOTE: This function will neither update a cache entry in case it was
1022 * expired, nor create a new cache entry if @path hasn't been found. It heavily
1023 * relies on an existing cache entry.
1025 * @path: path to lookup in DFS referral cache.
1026 * @it: target iterator which contains the target hint to update the cache
1029 * Return zero if the target hint was updated successfully, otherwise non-zero.
1031 int dfs_cache_noreq_update_tgthint(const char *path,
1032 const struct dfs_cache_tgt_iterator *it)
1036 struct cache_entry *ce;
1037 struct cache_dfs_tgt *t;
1042 rc = get_normalized_path(path, &npath);
1046 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1048 down_write(&htable_rw_lock);
1050 ce = lookup_cache_entry(npath, NULL);
1059 if (unlikely(!strcasecmp(it->it_name, t->name)))
1062 list_for_each_entry(t, &ce->tlist, list) {
1063 if (!strcasecmp(t->name, it->it_name)) {
1065 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1072 up_write(&htable_rw_lock);
1073 free_normalized_path(path, npath);
1079 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1080 * target iterator (@it).
1082 * @path: path to lookup in DFS referral cache.
1083 * @it: DFS target iterator.
1084 * @ref: DFS referral pointer to set up the gathered information.
1086 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1088 int dfs_cache_get_tgt_referral(const char *path,
1089 const struct dfs_cache_tgt_iterator *it,
1090 struct dfs_info3_param *ref)
1094 struct cache_entry *ce;
1099 rc = get_normalized_path(path, &npath);
1103 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1105 down_read(&htable_rw_lock);
1107 ce = lookup_cache_entry(npath, NULL);
1113 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1115 rc = setup_referral(path, ce, ref, it->it_name);
1118 up_read(&htable_rw_lock);
1119 free_normalized_path(path, npath);
1125 * dfs_cache_add_vol - add a cifs context during mount() that will be handled by
1126 * DFS cache refresh worker.
1128 * @mntdata: mount data.
1129 * @ctx: cifs context.
1130 * @fullpath: origin full path.
1132 * Return zero if context was set up correctly, otherwise non-zero.
1134 int dfs_cache_add_vol(char *mntdata, struct smb3_fs_context *ctx, const char *fullpath)
1137 struct vol_info *vi;
1139 if (!ctx || !fullpath || !mntdata)
1142 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1144 vi = kzalloc(sizeof(*vi), GFP_KERNEL);
1148 vi->fullpath = kstrdup(fullpath, GFP_KERNEL);
1149 if (!vi->fullpath) {
1154 rc = smb3_fs_context_dup(&vi->ctx, ctx);
1156 goto err_free_fullpath;
1158 vi->mntdata = mntdata;
1159 spin_lock_init(&vi->ctx_lock);
1160 kref_init(&vi->refcnt);
1162 spin_lock(&vol_list_lock);
1163 list_add_tail(&vi->list, &vol_list);
1164 spin_unlock(&vol_list_lock);
1169 kfree(vi->fullpath);
1175 /* Must be called with vol_list_lock held */
1176 static struct vol_info *find_vol(const char *fullpath)
1178 struct vol_info *vi;
1180 list_for_each_entry(vi, &vol_list, list) {
1181 cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
1182 if (!strcasecmp(vi->fullpath, fullpath))
1185 return ERR_PTR(-ENOENT);
1189 * dfs_cache_update_vol - update vol info in DFS cache after failover
1191 * @fullpath: fullpath to look up in volume list.
1192 * @server: TCP ses pointer.
1194 * Return zero if volume was updated, otherwise non-zero.
1196 int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
1198 struct vol_info *vi;
1200 if (!fullpath || !server)
1203 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1205 spin_lock(&vol_list_lock);
1206 vi = find_vol(fullpath);
1208 spin_unlock(&vol_list_lock);
1211 kref_get(&vi->refcnt);
1212 spin_unlock(&vol_list_lock);
1214 cifs_dbg(FYI, "%s: updating volume info\n", __func__);
1215 spin_lock(&vi->ctx_lock);
1216 memcpy(&vi->ctx.dstaddr, &server->dstaddr,
1217 sizeof(vi->ctx.dstaddr));
1218 spin_unlock(&vi->ctx_lock);
1220 kref_put(&vi->refcnt, vol_release);
1226 * dfs_cache_del_vol - remove volume info in DFS cache during umount()
1228 * @fullpath: fullpath to look up in volume list.
1230 void dfs_cache_del_vol(const char *fullpath)
1232 struct vol_info *vi;
1234 if (!fullpath || !*fullpath)
1237 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1239 spin_lock(&vol_list_lock);
1240 vi = find_vol(fullpath);
1241 spin_unlock(&vol_list_lock);
1244 kref_put(&vi->refcnt, vol_release);
1248 * dfs_cache_get_tgt_share - parse a DFS target
1250 * @path: DFS full path
1251 * @it: DFS target iterator.
1252 * @share: tree name.
1253 * @prefix: prefix path.
1255 * Return zero if target was parsed correctly, otherwise non-zero.
1257 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
1258 char **share, char **prefix)
1262 size_t plen1, plen2;
1264 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1270 sep = it->it_name[0];
1271 if (sep != '\\' && sep != '/')
1274 s = strchr(it->it_name + 1, sep);
1278 /* point to prefix in target node */
1279 s = strchrnul(s + 1, sep);
1281 /* extract target share */
1282 *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
1286 /* skip separator */
1289 /* point to prefix in DFS path */
1290 p = path + it->it_path_consumed;
1294 /* merge prefix paths from DFS path and target node */
1295 plen1 = it->it_name + strlen(it->it_name) - s;
1296 plen2 = path + strlen(path) - p;
1297 if (plen1 || plen2) {
1298 len = plen1 + plen2 + 2;
1299 *prefix = kmalloc(len, GFP_KERNEL);
1306 scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
1308 strscpy(*prefix, p, len);
1313 /* Get all tcons that are within a DFS namespace and can be refreshed */
1314 static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
1316 struct cifs_ses *ses;
1317 struct cifs_tcon *tcon;
1319 INIT_LIST_HEAD(head);
1321 spin_lock(&cifs_tcp_ses_lock);
1322 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1323 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1324 if (!tcon->need_reconnect && !tcon->need_reopen_files &&
1327 list_add_tail(&tcon->ulist, head);
1330 if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
1331 ses->tcon_ipc->dfs_path) {
1332 list_add_tail(&ses->tcon_ipc->ulist, head);
1335 spin_unlock(&cifs_tcp_ses_lock);
1338 static bool is_dfs_link(const char *path)
1342 s = strchr(path + 1, '\\');
1345 return !!strchr(s + 1, '\\');
1348 static char *get_dfs_root(const char *path)
1352 s = strchr(path + 1, '\\');
1354 return ERR_PTR(-EINVAL);
1356 s = strchr(s + 1, '\\');
1358 return ERR_PTR(-EINVAL);
1360 npath = kstrndup(path, s - path, GFP_KERNEL);
1362 return ERR_PTR(-ENOMEM);
1367 static inline void put_tcp_server(struct TCP_Server_Info *server)
1369 cifs_put_tcp_session(server, 0);
1372 static struct TCP_Server_Info *get_tcp_server(struct smb3_fs_context *ctx)
1374 struct TCP_Server_Info *server;
1376 server = cifs_find_tcp_session(ctx);
1377 if (IS_ERR_OR_NULL(server))
1380 spin_lock(&GlobalMid_Lock);
1381 if (server->tcpStatus != CifsGood) {
1382 spin_unlock(&GlobalMid_Lock);
1383 put_tcp_server(server);
1386 spin_unlock(&GlobalMid_Lock);
1391 /* Find root SMB session out of a DFS link path */
1392 static struct cifs_ses *find_root_ses(struct vol_info *vi,
1393 struct cifs_tcon *tcon,
1398 struct cache_entry *ce;
1399 struct dfs_info3_param ref = {0};
1400 char *mdata = NULL, *devname = NULL;
1401 struct TCP_Server_Info *server;
1402 struct cifs_ses *ses;
1403 struct smb3_fs_context ctx = {NULL};
1405 rpath = get_dfs_root(path);
1407 return ERR_CAST(rpath);
1409 down_read(&htable_rw_lock);
1411 ce = lookup_cache_entry(rpath, NULL);
1413 up_read(&htable_rw_lock);
1418 rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
1420 up_read(&htable_rw_lock);
1425 up_read(&htable_rw_lock);
1427 mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
1429 free_dfs_info_param(&ref);
1431 if (IS_ERR(mdata)) {
1432 ses = ERR_CAST(mdata);
1437 rc = cifs_setup_volume_info(&ctx, NULL, devname);
1444 server = get_tcp_server(&ctx);
1446 ses = ERR_PTR(-EHOSTDOWN);
1450 ses = cifs_get_smb_ses(server, &ctx);
1453 smb3_cleanup_fs_context_contents(&ctx);
1461 /* Refresh DFS cache entry from a given tcon */
1462 static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
1466 const char *path, *npath;
1467 struct cache_entry *ce;
1468 struct cifs_ses *root_ses = NULL, *ses;
1469 struct dfs_info3_param *refs = NULL;
1474 path = tcon->dfs_path + 1;
1476 rc = get_normalized_path(path, &npath);
1480 down_read(&htable_rw_lock);
1482 ce = lookup_cache_entry(npath, NULL);
1485 up_read(&htable_rw_lock);
1489 if (!cache_entry_expired(ce)) {
1490 up_read(&htable_rw_lock);
1494 up_read(&htable_rw_lock);
1496 /* If it's a DFS Link, then use root SMB session for refreshing it */
1497 if (is_dfs_link(npath)) {
1498 ses = root_ses = find_root_ses(vi, tcon, npath);
1508 rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
1511 dump_refs(refs, numrefs);
1512 rc = update_cache_entry(npath, refs, numrefs);
1513 free_dfs_info_array(refs, numrefs);
1517 cifs_put_smb_ses(root_ses);
1520 free_normalized_path(path, npath);
1528 * Worker that will refresh DFS cache based on lowest TTL value from a DFS
1531 static void refresh_cache_worker(struct work_struct *work)
1533 struct vol_info *vi, *nvi;
1534 struct TCP_Server_Info *server;
1537 struct cifs_tcon *tcon, *ntcon;
1541 * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
1544 spin_lock(&vol_list_lock);
1545 list_for_each_entry(vi, &vol_list, list) {
1546 server = get_tcp_server(&vi->ctx);
1550 kref_get(&vi->refcnt);
1551 list_add_tail(&vi->rlist, &vols);
1552 put_tcp_server(server);
1554 spin_unlock(&vol_list_lock);
1556 /* Walk through all TCONs and refresh any expired cache entry */
1557 list_for_each_entry_safe(vi, nvi, &vols, rlist) {
1558 spin_lock(&vi->ctx_lock);
1559 server = get_tcp_server(&vi->ctx);
1560 spin_unlock(&vi->ctx_lock);
1565 get_tcons(server, &tcons);
1568 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1570 * Skip tcp server if any of its tcons failed to refresh
1571 * (possibily due to reconnects).
1574 rc = refresh_tcon(vi, tcon);
1576 list_del_init(&tcon->ulist);
1577 cifs_put_tcon(tcon);
1580 put_tcp_server(server);
1583 list_del_init(&vi->rlist);
1584 kref_put(&vi->refcnt, vol_release);
1587 spin_lock(&cache_ttl_lock);
1588 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1589 spin_unlock(&cache_ttl_lock);