4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
46 * dcache->d_inode->i_lock protects:
47 * - i_dentry, d_alias, d_inode of aliases
48 * dcache_hash_bucket lock protects:
49 * - the dcache hash table
50 * s_anon bl list spinlock protects:
51 * - the s_anon list (see __d_drop)
52 * dentry->d_sb->s_dentry_lru_lock protects:
53 * - the dcache lru lists and counters
60 * - d_parent and d_subdirs
61 * - childrens' d_child and d_parent
65 * dentry->d_inode->i_lock
67 * dentry->d_sb->s_dentry_lru_lock
68 * dcache_hash_bucket lock
71 * If there is an ancestor relationship:
72 * dentry->d_parent->...->d_parent->d_lock
74 * dentry->d_parent->d_lock
77 * If no ancestor relationship:
78 * if (dentry1 < dentry2)
82 int sysctl_vfs_cache_pressure __read_mostly = 100;
83 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
87 EXPORT_SYMBOL(rename_lock);
89 static struct kmem_cache *dentry_cache __read_mostly;
92 * This is the single most critical data structure when it comes
93 * to the dcache: the hashtable for lookups. Somebody should try
94 * to make this good - I've just made it work.
96 * This hash-function tries to avoid losing too many bits of hash
97 * information, yet avoid using a prime hash-size or similar.
100 static unsigned int d_hash_mask __read_mostly;
101 static unsigned int d_hash_shift __read_mostly;
103 static struct hlist_bl_head *dentry_hashtable __read_mostly;
105 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
108 hash += (unsigned long) parent / L1_CACHE_BYTES;
109 return dentry_hashtable + hash_32(hash, d_hash_shift);
112 /* Statistics gathering. */
113 struct dentry_stat_t dentry_stat = {
117 static DEFINE_PER_CPU(long, nr_dentry);
118 static DEFINE_PER_CPU(long, nr_dentry_unused);
120 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
123 * Here we resort to our own counters instead of using generic per-cpu counters
124 * for consistency with what the vfs inode code does. We are expected to harvest
125 * better code and performance by having our own specialized counters.
127 * Please note that the loop is done over all possible CPUs, not over all online
128 * CPUs. The reason for this is that we don't want to play games with CPUs going
129 * on and off. If one of them goes off, we will just keep their counters.
131 * glommer: See cffbc8a for details, and if you ever intend to change this,
132 * please update all vfs counters to match.
134 static long get_nr_dentry(void)
138 for_each_possible_cpu(i)
139 sum += per_cpu(nr_dentry, i);
140 return sum < 0 ? 0 : sum;
143 static long get_nr_dentry_unused(void)
147 for_each_possible_cpu(i)
148 sum += per_cpu(nr_dentry_unused, i);
149 return sum < 0 ? 0 : sum;
152 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
153 size_t *lenp, loff_t *ppos)
155 dentry_stat.nr_dentry = get_nr_dentry();
156 dentry_stat.nr_unused = get_nr_dentry_unused();
157 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
162 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
163 * The strings are both count bytes long, and count is non-zero.
165 #ifdef CONFIG_DCACHE_WORD_ACCESS
167 #include <asm/word-at-a-time.h>
169 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
170 * aligned allocation for this particular component. We don't
171 * strictly need the load_unaligned_zeropad() safety, but it
172 * doesn't hurt either.
174 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
175 * need the careful unaligned handling.
177 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
179 unsigned long a,b,mask;
182 a = *(unsigned long *)cs;
183 b = load_unaligned_zeropad(ct);
184 if (tcount < sizeof(unsigned long))
186 if (unlikely(a != b))
188 cs += sizeof(unsigned long);
189 ct += sizeof(unsigned long);
190 tcount -= sizeof(unsigned long);
194 mask = bytemask_from_count(tcount);
195 return unlikely(!!((a ^ b) & mask));
200 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
214 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
216 const unsigned char *cs;
218 * Be careful about RCU walk racing with rename:
219 * use ACCESS_ONCE to fetch the name pointer.
221 * NOTE! Even if a rename will mean that the length
222 * was not loaded atomically, we don't care. The
223 * RCU walk will check the sequence count eventually,
224 * and catch it. And we won't overrun the buffer,
225 * because we're reading the name pointer atomically,
226 * and a dentry name is guaranteed to be properly
227 * terminated with a NUL byte.
229 * End result: even if 'len' is wrong, we'll exit
230 * early because the data cannot match (there can
231 * be no NUL in the ct/tcount data)
233 cs = ACCESS_ONCE(dentry->d_name.name);
234 smp_read_barrier_depends();
235 return dentry_string_cmp(cs, ct, tcount);
238 static void __d_free(struct rcu_head *head)
240 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
242 WARN_ON(!hlist_unhashed(&dentry->d_alias));
243 if (dname_external(dentry))
244 kfree(dentry->d_name.name);
245 kmem_cache_free(dentry_cache, dentry);
251 static void d_free(struct dentry *dentry)
253 BUG_ON((int)dentry->d_lockref.count > 0);
254 this_cpu_dec(nr_dentry);
255 if (dentry->d_op && dentry->d_op->d_release)
256 dentry->d_op->d_release(dentry);
258 /* if dentry was never visible to RCU, immediate free is OK */
259 if (!(dentry->d_flags & DCACHE_RCUACCESS))
260 __d_free(&dentry->d_u.d_rcu);
262 call_rcu(&dentry->d_u.d_rcu, __d_free);
266 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
267 * @dentry: the target dentry
268 * After this call, in-progress rcu-walk path lookup will fail. This
269 * should be called after unhashing, and after changing d_inode (if
270 * the dentry has not already been unhashed).
272 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
274 assert_spin_locked(&dentry->d_lock);
275 /* Go through a barrier */
276 write_seqcount_barrier(&dentry->d_seq);
280 * Release the dentry's inode, using the filesystem
281 * d_iput() operation if defined. Dentry has no refcount
284 static void dentry_iput(struct dentry * dentry)
285 __releases(dentry->d_lock)
286 __releases(dentry->d_inode->i_lock)
288 struct inode *inode = dentry->d_inode;
290 dentry->d_inode = NULL;
291 hlist_del_init(&dentry->d_alias);
292 spin_unlock(&dentry->d_lock);
293 spin_unlock(&inode->i_lock);
295 fsnotify_inoderemove(inode);
296 if (dentry->d_op && dentry->d_op->d_iput)
297 dentry->d_op->d_iput(dentry, inode);
301 spin_unlock(&dentry->d_lock);
306 * Release the dentry's inode, using the filesystem
307 * d_iput() operation if defined. dentry remains in-use.
309 static void dentry_unlink_inode(struct dentry * dentry)
310 __releases(dentry->d_lock)
311 __releases(dentry->d_inode->i_lock)
313 struct inode *inode = dentry->d_inode;
314 __d_clear_type(dentry);
315 dentry->d_inode = NULL;
316 hlist_del_init(&dentry->d_alias);
317 dentry_rcuwalk_barrier(dentry);
318 spin_unlock(&dentry->d_lock);
319 spin_unlock(&inode->i_lock);
321 fsnotify_inoderemove(inode);
322 if (dentry->d_op && dentry->d_op->d_iput)
323 dentry->d_op->d_iput(dentry, inode);
329 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
330 * is in use - which includes both the "real" per-superblock
331 * LRU list _and_ the DCACHE_SHRINK_LIST use.
333 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
334 * on the shrink list (ie not on the superblock LRU list).
336 * The per-cpu "nr_dentry_unused" counters are updated with
337 * the DCACHE_LRU_LIST bit.
339 * These helper functions make sure we always follow the
340 * rules. d_lock must be held by the caller.
342 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
343 static void d_lru_add(struct dentry *dentry)
345 D_FLAG_VERIFY(dentry, 0);
346 dentry->d_flags |= DCACHE_LRU_LIST;
347 this_cpu_inc(nr_dentry_unused);
348 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
351 static void d_lru_del(struct dentry *dentry)
353 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
354 dentry->d_flags &= ~DCACHE_LRU_LIST;
355 this_cpu_dec(nr_dentry_unused);
356 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
359 static void d_shrink_del(struct dentry *dentry)
361 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
362 list_del_init(&dentry->d_lru);
363 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
364 this_cpu_dec(nr_dentry_unused);
367 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
369 D_FLAG_VERIFY(dentry, 0);
370 list_add(&dentry->d_lru, list);
371 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
372 this_cpu_inc(nr_dentry_unused);
376 * These can only be called under the global LRU lock, ie during the
377 * callback for freeing the LRU list. "isolate" removes it from the
378 * LRU lists entirely, while shrink_move moves it to the indicated
381 static void d_lru_isolate(struct dentry *dentry)
383 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
384 dentry->d_flags &= ~DCACHE_LRU_LIST;
385 this_cpu_dec(nr_dentry_unused);
386 list_del_init(&dentry->d_lru);
389 static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
391 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
392 dentry->d_flags |= DCACHE_SHRINK_LIST;
393 list_move_tail(&dentry->d_lru, list);
397 * dentry_lru_(add|del)_list) must be called with d_lock held.
399 static void dentry_lru_add(struct dentry *dentry)
401 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
406 * Remove a dentry with references from the LRU.
408 * If we are on the shrink list, then we can get to try_prune_one_dentry() and
409 * lose our last reference through the parent walk. In this case, we need to
410 * remove ourselves from the shrink list, not the LRU.
412 static void dentry_lru_del(struct dentry *dentry)
414 if (dentry->d_flags & DCACHE_LRU_LIST) {
415 if (dentry->d_flags & DCACHE_SHRINK_LIST)
416 return d_shrink_del(dentry);
422 * d_kill - kill dentry and return parent
423 * @dentry: dentry to kill
424 * @parent: parent dentry
426 * The dentry must already be unhashed and removed from the LRU.
428 * If this is the root of the dentry tree, return NULL.
430 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
433 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
434 __releases(dentry->d_lock)
435 __releases(parent->d_lock)
436 __releases(dentry->d_inode->i_lock)
438 list_del(&dentry->d_u.d_child);
440 * Inform d_walk() that we are no longer attached to the
443 dentry->d_flags |= DCACHE_DENTRY_KILLED;
445 spin_unlock(&parent->d_lock);
448 * dentry_iput drops the locks, at which point nobody (except
449 * transient RCU lookups) can reach this dentry.
456 * d_drop - drop a dentry
457 * @dentry: dentry to drop
459 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
460 * be found through a VFS lookup any more. Note that this is different from
461 * deleting the dentry - d_delete will try to mark the dentry negative if
462 * possible, giving a successful _negative_ lookup, while d_drop will
463 * just make the cache lookup fail.
465 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
466 * reason (NFS timeouts or autofs deletes).
468 * __d_drop requires dentry->d_lock.
470 void __d_drop(struct dentry *dentry)
472 if (!d_unhashed(dentry)) {
473 struct hlist_bl_head *b;
475 * Hashed dentries are normally on the dentry hashtable,
476 * with the exception of those newly allocated by
477 * d_obtain_alias, which are always IS_ROOT:
479 if (unlikely(IS_ROOT(dentry)))
480 b = &dentry->d_sb->s_anon;
482 b = d_hash(dentry->d_parent, dentry->d_name.hash);
485 __hlist_bl_del(&dentry->d_hash);
486 dentry->d_hash.pprev = NULL;
488 dentry_rcuwalk_barrier(dentry);
491 EXPORT_SYMBOL(__d_drop);
493 void d_drop(struct dentry *dentry)
495 spin_lock(&dentry->d_lock);
497 spin_unlock(&dentry->d_lock);
499 EXPORT_SYMBOL(d_drop);
502 * Finish off a dentry we've decided to kill.
503 * dentry->d_lock must be held, returns with it unlocked.
504 * If ref is non-zero, then decrement the refcount too.
505 * Returns dentry requiring refcount drop, or NULL if we're done.
507 static struct dentry *
508 dentry_kill(struct dentry *dentry, int unlock_on_failure)
509 __releases(dentry->d_lock)
512 struct dentry *parent;
514 inode = dentry->d_inode;
515 if (inode && !spin_trylock(&inode->i_lock)) {
517 if (unlock_on_failure) {
518 spin_unlock(&dentry->d_lock);
521 return dentry; /* try again with same dentry */
526 parent = dentry->d_parent;
527 if (parent && !spin_trylock(&parent->d_lock)) {
529 spin_unlock(&inode->i_lock);
534 * The dentry is now unrecoverably dead to the world.
536 lockref_mark_dead(&dentry->d_lockref);
539 * inform the fs via d_prune that this dentry is about to be
540 * unhashed and destroyed.
542 if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
543 dentry->d_op->d_prune(dentry);
545 dentry_lru_del(dentry);
546 /* if it was on the hash then remove it */
548 return d_kill(dentry, parent);
554 * This is complicated by the fact that we do not want to put
555 * dentries that are no longer on any hash chain on the unused
556 * list: we'd much rather just get rid of them immediately.
558 * However, that implies that we have to traverse the dentry
559 * tree upwards to the parents which might _also_ now be
560 * scheduled for deletion (it may have been only waiting for
561 * its last child to go away).
563 * This tail recursion is done by hand as we don't want to depend
564 * on the compiler to always get this right (gcc generally doesn't).
565 * Real recursion would eat up our stack space.
569 * dput - release a dentry
570 * @dentry: dentry to release
572 * Release a dentry. This will drop the usage count and if appropriate
573 * call the dentry unlink method as well as removing it from the queues and
574 * releasing its resources. If the parent dentries were scheduled for release
575 * they too may now get deleted.
577 void dput(struct dentry *dentry)
579 if (unlikely(!dentry))
583 if (lockref_put_or_lock(&dentry->d_lockref))
586 /* Unreachable? Get rid of it */
587 if (unlikely(d_unhashed(dentry)))
590 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
591 if (dentry->d_op->d_delete(dentry))
595 if (!(dentry->d_flags & DCACHE_REFERENCED))
596 dentry->d_flags |= DCACHE_REFERENCED;
597 dentry_lru_add(dentry);
599 dentry->d_lockref.count--;
600 spin_unlock(&dentry->d_lock);
604 dentry = dentry_kill(dentry, 1);
611 * d_invalidate - invalidate a dentry
612 * @dentry: dentry to invalidate
614 * Try to invalidate the dentry if it turns out to be
615 * possible. If there are other dentries that can be
616 * reached through this one we can't delete it and we
617 * return -EBUSY. On success we return 0.
622 int d_invalidate(struct dentry * dentry)
625 * If it's already been dropped, return OK.
627 spin_lock(&dentry->d_lock);
628 if (d_unhashed(dentry)) {
629 spin_unlock(&dentry->d_lock);
633 * Check whether to do a partial shrink_dcache
634 * to get rid of unused child entries.
636 if (!list_empty(&dentry->d_subdirs)) {
637 spin_unlock(&dentry->d_lock);
638 shrink_dcache_parent(dentry);
639 spin_lock(&dentry->d_lock);
643 * Somebody else still using it?
645 * If it's a directory, we can't drop it
646 * for fear of somebody re-populating it
647 * with children (even though dropping it
648 * would make it unreachable from the root,
649 * we might still populate it if it was a
650 * working directory or similar).
651 * We also need to leave mountpoints alone,
654 if (dentry->d_lockref.count > 1 && dentry->d_inode) {
655 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
656 spin_unlock(&dentry->d_lock);
662 spin_unlock(&dentry->d_lock);
665 EXPORT_SYMBOL(d_invalidate);
667 /* This must be called with d_lock held */
668 static inline void __dget_dlock(struct dentry *dentry)
670 dentry->d_lockref.count++;
673 static inline void __dget(struct dentry *dentry)
675 lockref_get(&dentry->d_lockref);
678 struct dentry *dget_parent(struct dentry *dentry)
684 * Do optimistic parent lookup without any
688 ret = ACCESS_ONCE(dentry->d_parent);
689 gotref = lockref_get_not_zero(&ret->d_lockref);
691 if (likely(gotref)) {
692 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
699 * Don't need rcu_dereference because we re-check it was correct under
703 ret = dentry->d_parent;
704 spin_lock(&ret->d_lock);
705 if (unlikely(ret != dentry->d_parent)) {
706 spin_unlock(&ret->d_lock);
711 BUG_ON(!ret->d_lockref.count);
712 ret->d_lockref.count++;
713 spin_unlock(&ret->d_lock);
716 EXPORT_SYMBOL(dget_parent);
719 * d_find_alias - grab a hashed alias of inode
720 * @inode: inode in question
721 * @want_discon: flag, used by d_splice_alias, to request
722 * that only a DISCONNECTED alias be returned.
724 * If inode has a hashed alias, or is a directory and has any alias,
725 * acquire the reference to alias and return it. Otherwise return NULL.
726 * Notice that if inode is a directory there can be only one alias and
727 * it can be unhashed only if it has no children, or if it is the root
730 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
731 * any other hashed alias over that one unless @want_discon is set,
732 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
734 static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
736 struct dentry *alias, *discon_alias;
740 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
741 spin_lock(&alias->d_lock);
742 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
743 if (IS_ROOT(alias) &&
744 (alias->d_flags & DCACHE_DISCONNECTED)) {
745 discon_alias = alias;
746 } else if (!want_discon) {
748 spin_unlock(&alias->d_lock);
752 spin_unlock(&alias->d_lock);
755 alias = discon_alias;
756 spin_lock(&alias->d_lock);
757 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
758 if (IS_ROOT(alias) &&
759 (alias->d_flags & DCACHE_DISCONNECTED)) {
761 spin_unlock(&alias->d_lock);
765 spin_unlock(&alias->d_lock);
771 struct dentry *d_find_alias(struct inode *inode)
773 struct dentry *de = NULL;
775 if (!hlist_empty(&inode->i_dentry)) {
776 spin_lock(&inode->i_lock);
777 de = __d_find_alias(inode, 0);
778 spin_unlock(&inode->i_lock);
782 EXPORT_SYMBOL(d_find_alias);
785 * Try to kill dentries associated with this inode.
786 * WARNING: you must own a reference to inode.
788 void d_prune_aliases(struct inode *inode)
790 struct dentry *dentry;
792 spin_lock(&inode->i_lock);
793 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
794 spin_lock(&dentry->d_lock);
795 if (!dentry->d_lockref.count) {
797 * inform the fs via d_prune that this dentry
798 * is about to be unhashed and destroyed.
800 if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
802 dentry->d_op->d_prune(dentry);
804 __dget_dlock(dentry);
806 spin_unlock(&dentry->d_lock);
807 spin_unlock(&inode->i_lock);
811 spin_unlock(&dentry->d_lock);
813 spin_unlock(&inode->i_lock);
815 EXPORT_SYMBOL(d_prune_aliases);
818 * Try to throw away a dentry - free the inode, dput the parent.
819 * Requires dentry->d_lock is held, and dentry->d_count == 0.
820 * Releases dentry->d_lock.
822 * This may fail if locks cannot be acquired no problem, just try again.
824 static struct dentry * try_prune_one_dentry(struct dentry *dentry)
825 __releases(dentry->d_lock)
827 struct dentry *parent;
829 parent = dentry_kill(dentry, 0);
831 * If dentry_kill returns NULL, we have nothing more to do.
832 * if it returns the same dentry, trylocks failed. In either
833 * case, just loop again.
835 * Otherwise, we need to prune ancestors too. This is necessary
836 * to prevent quadratic behavior of shrink_dcache_parent(), but
837 * is also expected to be beneficial in reducing dentry cache
842 if (parent == dentry)
845 /* Prune ancestors. */
848 if (lockref_put_or_lock(&dentry->d_lockref))
850 dentry = dentry_kill(dentry, 1);
855 static void shrink_dentry_list(struct list_head *list)
857 struct dentry *dentry;
861 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
862 if (&dentry->d_lru == list)
866 * Get the dentry lock, and re-verify that the dentry is
867 * this on the shrinking list. If it is, we know that
868 * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
870 spin_lock(&dentry->d_lock);
871 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
872 spin_unlock(&dentry->d_lock);
877 * The dispose list is isolated and dentries are not accounted
878 * to the LRU here, so we can simply remove it from the list
879 * here regardless of whether it is referenced or not.
881 d_shrink_del(dentry);
884 * We found an inuse dentry which was not removed from
885 * the LRU because of laziness during lookup. Do not free it.
887 if (dentry->d_lockref.count) {
888 spin_unlock(&dentry->d_lock);
894 * If 'try_to_prune()' returns a dentry, it will
895 * be the same one we passed in, and d_lock will
896 * have been held the whole time, so it will not
897 * have been added to any other lists. We failed
898 * to get the inode lock.
900 * We just add it back to the shrink list.
902 dentry = try_prune_one_dentry(dentry);
906 d_shrink_add(dentry, list);
907 spin_unlock(&dentry->d_lock);
913 static enum lru_status
914 dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
916 struct list_head *freeable = arg;
917 struct dentry *dentry = container_of(item, struct dentry, d_lru);
921 * we are inverting the lru lock/dentry->d_lock here,
922 * so use a trylock. If we fail to get the lock, just skip
925 if (!spin_trylock(&dentry->d_lock))
929 * Referenced dentries are still in use. If they have active
930 * counts, just remove them from the LRU. Otherwise give them
931 * another pass through the LRU.
933 if (dentry->d_lockref.count) {
934 d_lru_isolate(dentry);
935 spin_unlock(&dentry->d_lock);
939 if (dentry->d_flags & DCACHE_REFERENCED) {
940 dentry->d_flags &= ~DCACHE_REFERENCED;
941 spin_unlock(&dentry->d_lock);
944 * The list move itself will be made by the common LRU code. At
945 * this point, we've dropped the dentry->d_lock but keep the
946 * lru lock. This is safe to do, since every list movement is
947 * protected by the lru lock even if both locks are held.
949 * This is guaranteed by the fact that all LRU management
950 * functions are intermediated by the LRU API calls like
951 * list_lru_add and list_lru_del. List movement in this file
952 * only ever occur through this functions or through callbacks
953 * like this one, that are called from the LRU API.
955 * The only exceptions to this are functions like
956 * shrink_dentry_list, and code that first checks for the
957 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
958 * operating only with stack provided lists after they are
959 * properly isolated from the main list. It is thus, always a
965 d_lru_shrink_move(dentry, freeable);
966 spin_unlock(&dentry->d_lock);
972 * prune_dcache_sb - shrink the dcache
974 * @nr_to_scan : number of entries to try to free
975 * @nid: which node to scan for freeable entities
977 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
978 * done when we need more memory an called from the superblock shrinker
981 * This function may fail to free any resources if all the dentries are in
984 long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
990 freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate,
991 &dispose, &nr_to_scan);
992 shrink_dentry_list(&dispose);
996 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
997 spinlock_t *lru_lock, void *arg)
999 struct list_head *freeable = arg;
1000 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1003 * we are inverting the lru lock/dentry->d_lock here,
1004 * so use a trylock. If we fail to get the lock, just skip
1007 if (!spin_trylock(&dentry->d_lock))
1010 d_lru_shrink_move(dentry, freeable);
1011 spin_unlock(&dentry->d_lock);
1018 * shrink_dcache_sb - shrink dcache for a superblock
1021 * Shrink the dcache for the specified super block. This is used to free
1022 * the dcache before unmounting a file system.
1024 void shrink_dcache_sb(struct super_block *sb)
1031 freed = list_lru_walk(&sb->s_dentry_lru,
1032 dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1034 this_cpu_sub(nr_dentry_unused, freed);
1035 shrink_dentry_list(&dispose);
1036 } while (freed > 0);
1038 EXPORT_SYMBOL(shrink_dcache_sb);
1041 * enum d_walk_ret - action to talke during tree walk
1042 * @D_WALK_CONTINUE: contrinue walk
1043 * @D_WALK_QUIT: quit walk
1044 * @D_WALK_NORETRY: quit when retry is needed
1045 * @D_WALK_SKIP: skip this dentry and its children
1055 * d_walk - walk the dentry tree
1056 * @parent: start of walk
1057 * @data: data passed to @enter() and @finish()
1058 * @enter: callback when first entering the dentry
1059 * @finish: callback when successfully finished the walk
1061 * The @enter() and @finish() callbacks are called with d_lock held.
1063 static void d_walk(struct dentry *parent, void *data,
1064 enum d_walk_ret (*enter)(void *, struct dentry *),
1065 void (*finish)(void *))
1067 struct dentry *this_parent;
1068 struct list_head *next;
1070 enum d_walk_ret ret;
1074 read_seqbegin_or_lock(&rename_lock, &seq);
1075 this_parent = parent;
1076 spin_lock(&this_parent->d_lock);
1078 ret = enter(data, this_parent);
1080 case D_WALK_CONTINUE:
1085 case D_WALK_NORETRY:
1090 next = this_parent->d_subdirs.next;
1092 while (next != &this_parent->d_subdirs) {
1093 struct list_head *tmp = next;
1094 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1097 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1099 ret = enter(data, dentry);
1101 case D_WALK_CONTINUE:
1104 spin_unlock(&dentry->d_lock);
1106 case D_WALK_NORETRY:
1110 spin_unlock(&dentry->d_lock);
1114 if (!list_empty(&dentry->d_subdirs)) {
1115 spin_unlock(&this_parent->d_lock);
1116 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1117 this_parent = dentry;
1118 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1121 spin_unlock(&dentry->d_lock);
1124 * All done at this level ... ascend and resume the search.
1126 if (this_parent != parent) {
1127 struct dentry *child = this_parent;
1128 this_parent = child->d_parent;
1131 spin_unlock(&child->d_lock);
1132 spin_lock(&this_parent->d_lock);
1135 * might go back up the wrong parent if we have had a rename
1138 if (this_parent != child->d_parent ||
1139 (child->d_flags & DCACHE_DENTRY_KILLED) ||
1140 need_seqretry(&rename_lock, seq)) {
1141 spin_unlock(&this_parent->d_lock);
1146 next = child->d_u.d_child.next;
1149 if (need_seqretry(&rename_lock, seq)) {
1150 spin_unlock(&this_parent->d_lock);
1157 spin_unlock(&this_parent->d_lock);
1158 done_seqretry(&rename_lock, seq);
1169 * Search for at least 1 mount point in the dentry's subdirs.
1170 * We descend to the next level whenever the d_subdirs
1171 * list is non-empty and continue searching.
1174 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1177 if (d_mountpoint(dentry)) {
1181 return D_WALK_CONTINUE;
1185 * have_submounts - check for mounts over a dentry
1186 * @parent: dentry to check.
1188 * Return true if the parent or its subdirectories contain
1191 int have_submounts(struct dentry *parent)
1195 d_walk(parent, &ret, check_mount, NULL);
1199 EXPORT_SYMBOL(have_submounts);
1202 * Called by mount code to set a mountpoint and check if the mountpoint is
1203 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1204 * subtree can become unreachable).
1206 * Only one of check_submounts_and_drop() and d_set_mounted() must succeed. For
1207 * this reason take rename_lock and d_lock on dentry and ancestors.
1209 int d_set_mounted(struct dentry *dentry)
1213 write_seqlock(&rename_lock);
1214 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1215 /* Need exclusion wrt. check_submounts_and_drop() */
1216 spin_lock(&p->d_lock);
1217 if (unlikely(d_unhashed(p))) {
1218 spin_unlock(&p->d_lock);
1221 spin_unlock(&p->d_lock);
1223 spin_lock(&dentry->d_lock);
1224 if (!d_unlinked(dentry)) {
1225 dentry->d_flags |= DCACHE_MOUNTED;
1228 spin_unlock(&dentry->d_lock);
1230 write_sequnlock(&rename_lock);
1235 * Search the dentry child list of the specified parent,
1236 * and move any unused dentries to the end of the unused
1237 * list for prune_dcache(). We descend to the next level
1238 * whenever the d_subdirs list is non-empty and continue
1241 * It returns zero iff there are no unused children,
1242 * otherwise it returns the number of children moved to
1243 * the end of the unused list. This may not be the total
1244 * number of unused children, because select_parent can
1245 * drop the lock and return early due to latency
1249 struct select_data {
1250 struct dentry *start;
1251 struct list_head dispose;
1255 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1257 struct select_data *data = _data;
1258 enum d_walk_ret ret = D_WALK_CONTINUE;
1260 if (data->start == dentry)
1264 * move only zero ref count dentries to the dispose list.
1266 * Those which are presently on the shrink list, being processed
1267 * by shrink_dentry_list(), shouldn't be moved. Otherwise the
1268 * loop in shrink_dcache_parent() might not make any progress
1271 if (dentry->d_lockref.count) {
1272 dentry_lru_del(dentry);
1273 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1275 * We can't use d_lru_shrink_move() because we
1276 * need to get the global LRU lock and do the
1280 d_shrink_add(dentry, &data->dispose);
1282 ret = D_WALK_NORETRY;
1285 * We can return to the caller if we have found some (this
1286 * ensures forward progress). We'll be coming back to find
1289 if (data->found && need_resched())
1296 * shrink_dcache_parent - prune dcache
1297 * @parent: parent of entries to prune
1299 * Prune the dcache to remove unused children of the parent dentry.
1301 void shrink_dcache_parent(struct dentry *parent)
1304 struct select_data data;
1306 INIT_LIST_HEAD(&data.dispose);
1307 data.start = parent;
1310 d_walk(parent, &data, select_collect, NULL);
1314 shrink_dentry_list(&data.dispose);
1318 EXPORT_SYMBOL(shrink_dcache_parent);
1320 static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
1322 struct select_data *data = _data;
1323 enum d_walk_ret ret = D_WALK_CONTINUE;
1325 if (dentry->d_lockref.count) {
1326 dentry_lru_del(dentry);
1327 if (likely(!list_empty(&dentry->d_subdirs)))
1329 if (dentry == data->start && dentry->d_lockref.count == 1)
1332 "BUG: Dentry %p{i=%lx,n=%s}"
1333 " still in use (%d)"
1334 " [unmount of %s %s]\n",
1337 dentry->d_inode->i_ino : 0UL,
1338 dentry->d_name.name,
1339 dentry->d_lockref.count,
1340 dentry->d_sb->s_type->name,
1341 dentry->d_sb->s_id);
1343 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1345 * We can't use d_lru_shrink_move() because we
1346 * need to get the global LRU lock and do the
1349 if (dentry->d_flags & DCACHE_LRU_LIST)
1351 d_shrink_add(dentry, &data->dispose);
1353 ret = D_WALK_NORETRY;
1356 if (data->found && need_resched())
1362 * destroy the dentries attached to a superblock on unmounting
1364 void shrink_dcache_for_umount(struct super_block *sb)
1366 struct dentry *dentry;
1368 if (down_read_trylock(&sb->s_umount))
1371 dentry = sb->s_root;
1374 struct select_data data;
1376 INIT_LIST_HEAD(&data.dispose);
1377 data.start = dentry;
1380 d_walk(dentry, &data, umount_collect, NULL);
1384 shrink_dentry_list(&data.dispose);
1390 while (!hlist_bl_empty(&sb->s_anon)) {
1391 struct select_data data;
1392 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
1394 INIT_LIST_HEAD(&data.dispose);
1398 d_walk(dentry, &data, umount_collect, NULL);
1400 shrink_dentry_list(&data.dispose);
1405 static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
1407 struct select_data *data = _data;
1409 if (d_mountpoint(dentry)) {
1410 data->found = -EBUSY;
1414 return select_collect(_data, dentry);
1417 static void check_and_drop(void *_data)
1419 struct select_data *data = _data;
1421 if (d_mountpoint(data->start))
1422 data->found = -EBUSY;
1424 __d_drop(data->start);
1428 * check_submounts_and_drop - prune dcache, check for submounts and drop
1430 * All done as a single atomic operation relative to has_unlinked_ancestor().
1431 * Returns 0 if successfully unhashed @parent. If there were submounts then
1434 * @dentry: dentry to prune and drop
1436 int check_submounts_and_drop(struct dentry *dentry)
1440 /* Negative dentries can be dropped without further checks */
1441 if (!dentry->d_inode) {
1447 struct select_data data;
1449 INIT_LIST_HEAD(&data.dispose);
1450 data.start = dentry;
1453 d_walk(dentry, &data, check_and_collect, check_and_drop);
1456 if (!list_empty(&data.dispose))
1457 shrink_dentry_list(&data.dispose);
1468 EXPORT_SYMBOL(check_submounts_and_drop);
1471 * __d_alloc - allocate a dcache entry
1472 * @sb: filesystem it will belong to
1473 * @name: qstr of the name
1475 * Allocates a dentry. It returns %NULL if there is insufficient memory
1476 * available. On a success the dentry is returned. The name passed in is
1477 * copied and the copy passed in may be reused after this call.
1480 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1482 struct dentry *dentry;
1485 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1490 * We guarantee that the inline name is always NUL-terminated.
1491 * This way the memcpy() done by the name switching in rename
1492 * will still always have a NUL at the end, even if we might
1493 * be overwriting an internal NUL character
1495 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1496 if (name->len > DNAME_INLINE_LEN-1) {
1497 dname = kmalloc(name->len + 1, GFP_KERNEL);
1499 kmem_cache_free(dentry_cache, dentry);
1503 dname = dentry->d_iname;
1506 dentry->d_name.len = name->len;
1507 dentry->d_name.hash = name->hash;
1508 memcpy(dname, name->name, name->len);
1509 dname[name->len] = 0;
1511 /* Make sure we always see the terminating NUL character */
1513 dentry->d_name.name = dname;
1515 dentry->d_lockref.count = 1;
1516 dentry->d_flags = 0;
1517 spin_lock_init(&dentry->d_lock);
1518 seqcount_init(&dentry->d_seq);
1519 dentry->d_inode = NULL;
1520 dentry->d_parent = dentry;
1522 dentry->d_op = NULL;
1523 dentry->d_fsdata = NULL;
1524 INIT_HLIST_BL_NODE(&dentry->d_hash);
1525 INIT_LIST_HEAD(&dentry->d_lru);
1526 INIT_LIST_HEAD(&dentry->d_subdirs);
1527 INIT_HLIST_NODE(&dentry->d_alias);
1528 INIT_LIST_HEAD(&dentry->d_u.d_child);
1529 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1531 this_cpu_inc(nr_dentry);
1537 * d_alloc - allocate a dcache entry
1538 * @parent: parent of entry to allocate
1539 * @name: qstr of the name
1541 * Allocates a dentry. It returns %NULL if there is insufficient memory
1542 * available. On a success the dentry is returned. The name passed in is
1543 * copied and the copy passed in may be reused after this call.
1545 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1547 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1551 spin_lock(&parent->d_lock);
1553 * don't need child lock because it is not subject
1554 * to concurrency here
1556 __dget_dlock(parent);
1557 dentry->d_parent = parent;
1558 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1559 spin_unlock(&parent->d_lock);
1563 EXPORT_SYMBOL(d_alloc);
1566 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1567 * @sb: the superblock
1568 * @name: qstr of the name
1570 * For a filesystem that just pins its dentries in memory and never
1571 * performs lookups at all, return an unhashed IS_ROOT dentry.
1573 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1575 return __d_alloc(sb, name);
1577 EXPORT_SYMBOL(d_alloc_pseudo);
1579 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1584 q.len = strlen(name);
1585 q.hash = full_name_hash(q.name, q.len);
1586 return d_alloc(parent, &q);
1588 EXPORT_SYMBOL(d_alloc_name);
1590 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1592 WARN_ON_ONCE(dentry->d_op);
1593 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1595 DCACHE_OP_REVALIDATE |
1596 DCACHE_OP_WEAK_REVALIDATE |
1597 DCACHE_OP_DELETE ));
1602 dentry->d_flags |= DCACHE_OP_HASH;
1604 dentry->d_flags |= DCACHE_OP_COMPARE;
1605 if (op->d_revalidate)
1606 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1607 if (op->d_weak_revalidate)
1608 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1610 dentry->d_flags |= DCACHE_OP_DELETE;
1612 dentry->d_flags |= DCACHE_OP_PRUNE;
1615 EXPORT_SYMBOL(d_set_d_op);
1617 static unsigned d_flags_for_inode(struct inode *inode)
1619 unsigned add_flags = DCACHE_FILE_TYPE;
1622 return DCACHE_MISS_TYPE;
1624 if (S_ISDIR(inode->i_mode)) {
1625 add_flags = DCACHE_DIRECTORY_TYPE;
1626 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1627 if (unlikely(!inode->i_op->lookup))
1628 add_flags = DCACHE_AUTODIR_TYPE;
1630 inode->i_opflags |= IOP_LOOKUP;
1632 } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1633 if (unlikely(inode->i_op->follow_link))
1634 add_flags = DCACHE_SYMLINK_TYPE;
1636 inode->i_opflags |= IOP_NOFOLLOW;
1639 if (unlikely(IS_AUTOMOUNT(inode)))
1640 add_flags |= DCACHE_NEED_AUTOMOUNT;
1644 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1646 unsigned add_flags = d_flags_for_inode(inode);
1648 spin_lock(&dentry->d_lock);
1649 __d_set_type(dentry, add_flags);
1651 hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1652 dentry->d_inode = inode;
1653 dentry_rcuwalk_barrier(dentry);
1654 spin_unlock(&dentry->d_lock);
1655 fsnotify_d_instantiate(dentry, inode);
1659 * d_instantiate - fill in inode information for a dentry
1660 * @entry: dentry to complete
1661 * @inode: inode to attach to this dentry
1663 * Fill in inode information in the entry.
1665 * This turns negative dentries into productive full members
1668 * NOTE! This assumes that the inode count has been incremented
1669 * (or otherwise set) by the caller to indicate that it is now
1670 * in use by the dcache.
1673 void d_instantiate(struct dentry *entry, struct inode * inode)
1675 BUG_ON(!hlist_unhashed(&entry->d_alias));
1677 spin_lock(&inode->i_lock);
1678 __d_instantiate(entry, inode);
1680 spin_unlock(&inode->i_lock);
1681 security_d_instantiate(entry, inode);
1683 EXPORT_SYMBOL(d_instantiate);
1686 * d_instantiate_unique - instantiate a non-aliased dentry
1687 * @entry: dentry to instantiate
1688 * @inode: inode to attach to this dentry
1690 * Fill in inode information in the entry. On success, it returns NULL.
1691 * If an unhashed alias of "entry" already exists, then we return the
1692 * aliased dentry instead and drop one reference to inode.
1694 * Note that in order to avoid conflicts with rename() etc, the caller
1695 * had better be holding the parent directory semaphore.
1697 * This also assumes that the inode count has been incremented
1698 * (or otherwise set) by the caller to indicate that it is now
1699 * in use by the dcache.
1701 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1702 struct inode *inode)
1704 struct dentry *alias;
1705 int len = entry->d_name.len;
1706 const char *name = entry->d_name.name;
1707 unsigned int hash = entry->d_name.hash;
1710 __d_instantiate(entry, NULL);
1714 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1716 * Don't need alias->d_lock here, because aliases with
1717 * d_parent == entry->d_parent are not subject to name or
1718 * parent changes, because the parent inode i_mutex is held.
1720 if (alias->d_name.hash != hash)
1722 if (alias->d_parent != entry->d_parent)
1724 if (alias->d_name.len != len)
1726 if (dentry_cmp(alias, name, len))
1732 __d_instantiate(entry, inode);
1736 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1738 struct dentry *result;
1740 BUG_ON(!hlist_unhashed(&entry->d_alias));
1743 spin_lock(&inode->i_lock);
1744 result = __d_instantiate_unique(entry, inode);
1746 spin_unlock(&inode->i_lock);
1749 security_d_instantiate(entry, inode);
1753 BUG_ON(!d_unhashed(result));
1758 EXPORT_SYMBOL(d_instantiate_unique);
1761 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1762 * @entry: dentry to complete
1763 * @inode: inode to attach to this dentry
1765 * Fill in inode information in the entry. If a directory alias is found, then
1766 * return an error (and drop inode). Together with d_materialise_unique() this
1767 * guarantees that a directory inode may never have more than one alias.
1769 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1771 BUG_ON(!hlist_unhashed(&entry->d_alias));
1773 spin_lock(&inode->i_lock);
1774 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1775 spin_unlock(&inode->i_lock);
1779 __d_instantiate(entry, inode);
1780 spin_unlock(&inode->i_lock);
1781 security_d_instantiate(entry, inode);
1785 EXPORT_SYMBOL(d_instantiate_no_diralias);
1787 struct dentry *d_make_root(struct inode *root_inode)
1789 struct dentry *res = NULL;
1792 static const struct qstr name = QSTR_INIT("/", 1);
1794 res = __d_alloc(root_inode->i_sb, &name);
1796 d_instantiate(res, root_inode);
1802 EXPORT_SYMBOL(d_make_root);
1804 static struct dentry * __d_find_any_alias(struct inode *inode)
1806 struct dentry *alias;
1808 if (hlist_empty(&inode->i_dentry))
1810 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1816 * d_find_any_alias - find any alias for a given inode
1817 * @inode: inode to find an alias for
1819 * If any aliases exist for the given inode, take and return a
1820 * reference for one of them. If no aliases exist, return %NULL.
1822 struct dentry *d_find_any_alias(struct inode *inode)
1826 spin_lock(&inode->i_lock);
1827 de = __d_find_any_alias(inode);
1828 spin_unlock(&inode->i_lock);
1831 EXPORT_SYMBOL(d_find_any_alias);
1834 * d_obtain_alias - find or allocate a dentry for a given inode
1835 * @inode: inode to allocate the dentry for
1837 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1838 * similar open by handle operations. The returned dentry may be anonymous,
1839 * or may have a full name (if the inode was already in the cache).
1841 * When called on a directory inode, we must ensure that the inode only ever
1842 * has one dentry. If a dentry is found, that is returned instead of
1843 * allocating a new one.
1845 * On successful return, the reference to the inode has been transferred
1846 * to the dentry. In case of an error the reference on the inode is released.
1847 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1848 * be passed in and will be the error will be propagate to the return value,
1849 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1851 struct dentry *d_obtain_alias(struct inode *inode)
1853 static const struct qstr anonstring = QSTR_INIT("/", 1);
1859 return ERR_PTR(-ESTALE);
1861 return ERR_CAST(inode);
1863 res = d_find_any_alias(inode);
1867 tmp = __d_alloc(inode->i_sb, &anonstring);
1869 res = ERR_PTR(-ENOMEM);
1873 spin_lock(&inode->i_lock);
1874 res = __d_find_any_alias(inode);
1876 spin_unlock(&inode->i_lock);
1881 /* attach a disconnected dentry */
1882 add_flags = d_flags_for_inode(inode) | DCACHE_DISCONNECTED;
1884 spin_lock(&tmp->d_lock);
1885 tmp->d_inode = inode;
1886 tmp->d_flags |= add_flags;
1887 hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1888 hlist_bl_lock(&tmp->d_sb->s_anon);
1889 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1890 hlist_bl_unlock(&tmp->d_sb->s_anon);
1891 spin_unlock(&tmp->d_lock);
1892 spin_unlock(&inode->i_lock);
1893 security_d_instantiate(tmp, inode);
1898 if (res && !IS_ERR(res))
1899 security_d_instantiate(res, inode);
1903 EXPORT_SYMBOL(d_obtain_alias);
1906 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1907 * @inode: the inode which may have a disconnected dentry
1908 * @dentry: a negative dentry which we want to point to the inode.
1910 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1911 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1912 * and return it, else simply d_add the inode to the dentry and return NULL.
1914 * This is needed in the lookup routine of any filesystem that is exportable
1915 * (via knfsd) so that we can build dcache paths to directories effectively.
1917 * If a dentry was found and moved, then it is returned. Otherwise NULL
1918 * is returned. This matches the expected return value of ->lookup.
1920 * Cluster filesystems may call this function with a negative, hashed dentry.
1921 * In that case, we know that the inode will be a regular file, and also this
1922 * will only occur during atomic_open. So we need to check for the dentry
1923 * being already hashed only in the final case.
1925 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1927 struct dentry *new = NULL;
1930 return ERR_CAST(inode);
1932 if (inode && S_ISDIR(inode->i_mode)) {
1933 spin_lock(&inode->i_lock);
1934 new = __d_find_alias(inode, 1);
1936 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1937 spin_unlock(&inode->i_lock);
1938 security_d_instantiate(new, inode);
1939 d_move(new, dentry);
1942 /* already taking inode->i_lock, so d_add() by hand */
1943 __d_instantiate(dentry, inode);
1944 spin_unlock(&inode->i_lock);
1945 security_d_instantiate(dentry, inode);
1949 d_instantiate(dentry, inode);
1950 if (d_unhashed(dentry))
1955 EXPORT_SYMBOL(d_splice_alias);
1958 * d_add_ci - lookup or allocate new dentry with case-exact name
1959 * @inode: the inode case-insensitive lookup has found
1960 * @dentry: the negative dentry that was passed to the parent's lookup func
1961 * @name: the case-exact name to be associated with the returned dentry
1963 * This is to avoid filling the dcache with case-insensitive names to the
1964 * same inode, only the actual correct case is stored in the dcache for
1965 * case-insensitive filesystems.
1967 * For a case-insensitive lookup match and if the the case-exact dentry
1968 * already exists in in the dcache, use it and return it.
1970 * If no entry exists with the exact case name, allocate new dentry with
1971 * the exact case, and return the spliced entry.
1973 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1976 struct dentry *found;
1980 * First check if a dentry matching the name already exists,
1981 * if not go ahead and create it now.
1983 found = d_hash_and_lookup(dentry->d_parent, name);
1984 if (unlikely(IS_ERR(found)))
1987 new = d_alloc(dentry->d_parent, name);
1989 found = ERR_PTR(-ENOMEM);
1993 found = d_splice_alias(inode, new);
2002 * If a matching dentry exists, and it's not negative use it.
2004 * Decrement the reference count to balance the iget() done
2007 if (found->d_inode) {
2008 if (unlikely(found->d_inode != inode)) {
2009 /* This can't happen because bad inodes are unhashed. */
2010 BUG_ON(!is_bad_inode(inode));
2011 BUG_ON(!is_bad_inode(found->d_inode));
2018 * Negative dentry: instantiate it unless the inode is a directory and
2019 * already has a dentry.
2021 new = d_splice_alias(inode, found);
2032 EXPORT_SYMBOL(d_add_ci);
2035 * Do the slow-case of the dentry name compare.
2037 * Unlike the dentry_cmp() function, we need to atomically
2038 * load the name and length information, so that the
2039 * filesystem can rely on them, and can use the 'name' and
2040 * 'len' information without worrying about walking off the
2041 * end of memory etc.
2043 * Thus the read_seqcount_retry() and the "duplicate" info
2044 * in arguments (the low-level filesystem should not look
2045 * at the dentry inode or name contents directly, since
2046 * rename can change them while we're in RCU mode).
2048 enum slow_d_compare {
2054 static noinline enum slow_d_compare slow_dentry_cmp(
2055 const struct dentry *parent,
2056 struct dentry *dentry,
2058 const struct qstr *name)
2060 int tlen = dentry->d_name.len;
2061 const char *tname = dentry->d_name.name;
2063 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2065 return D_COMP_SEQRETRY;
2067 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2068 return D_COMP_NOMATCH;
2073 * __d_lookup_rcu - search for a dentry (racy, store-free)
2074 * @parent: parent dentry
2075 * @name: qstr of name we wish to find
2076 * @seqp: returns d_seq value at the point where the dentry was found
2077 * Returns: dentry, or NULL
2079 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2080 * resolution (store-free path walking) design described in
2081 * Documentation/filesystems/path-lookup.txt.
2083 * This is not to be used outside core vfs.
2085 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2086 * held, and rcu_read_lock held. The returned dentry must not be stored into
2087 * without taking d_lock and checking d_seq sequence count against @seq
2090 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2093 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2094 * the returned dentry, so long as its parent's seqlock is checked after the
2095 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2096 * is formed, giving integrity down the path walk.
2098 * NOTE! The caller *has* to check the resulting dentry against the sequence
2099 * number we've returned before using any of the resulting dentry state!
2101 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2102 const struct qstr *name,
2105 u64 hashlen = name->hash_len;
2106 const unsigned char *str = name->name;
2107 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2108 struct hlist_bl_node *node;
2109 struct dentry *dentry;
2112 * Note: There is significant duplication with __d_lookup_rcu which is
2113 * required to prevent single threaded performance regressions
2114 * especially on architectures where smp_rmb (in seqcounts) are costly.
2115 * Keep the two functions in sync.
2119 * The hash list is protected using RCU.
2121 * Carefully use d_seq when comparing a candidate dentry, to avoid
2122 * races with d_move().
2124 * It is possible that concurrent renames can mess up our list
2125 * walk here and result in missing our dentry, resulting in the
2126 * false-negative result. d_lookup() protects against concurrent
2127 * renames using rename_lock seqlock.
2129 * See Documentation/filesystems/path-lookup.txt for more details.
2131 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2136 * The dentry sequence count protects us from concurrent
2137 * renames, and thus protects parent and name fields.
2139 * The caller must perform a seqcount check in order
2140 * to do anything useful with the returned dentry.
2142 * NOTE! We do a "raw" seqcount_begin here. That means that
2143 * we don't wait for the sequence count to stabilize if it
2144 * is in the middle of a sequence change. If we do the slow
2145 * dentry compare, we will do seqretries until it is stable,
2146 * and if we end up with a successful lookup, we actually
2147 * want to exit RCU lookup anyway.
2149 seq = raw_seqcount_begin(&dentry->d_seq);
2150 if (dentry->d_parent != parent)
2152 if (d_unhashed(dentry))
2155 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2156 if (dentry->d_name.hash != hashlen_hash(hashlen))
2159 switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2162 case D_COMP_NOMATCH:
2169 if (dentry->d_name.hash_len != hashlen)
2172 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2179 * d_lookup - search for a dentry
2180 * @parent: parent dentry
2181 * @name: qstr of name we wish to find
2182 * Returns: dentry, or NULL
2184 * d_lookup searches the children of the parent dentry for the name in
2185 * question. If the dentry is found its reference count is incremented and the
2186 * dentry is returned. The caller must use dput to free the entry when it has
2187 * finished using it. %NULL is returned if the dentry does not exist.
2189 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2191 struct dentry *dentry;
2195 seq = read_seqbegin(&rename_lock);
2196 dentry = __d_lookup(parent, name);
2199 } while (read_seqretry(&rename_lock, seq));
2202 EXPORT_SYMBOL(d_lookup);
2205 * __d_lookup - search for a dentry (racy)
2206 * @parent: parent dentry
2207 * @name: qstr of name we wish to find
2208 * Returns: dentry, or NULL
2210 * __d_lookup is like d_lookup, however it may (rarely) return a
2211 * false-negative result due to unrelated rename activity.
2213 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2214 * however it must be used carefully, eg. with a following d_lookup in
2215 * the case of failure.
2217 * __d_lookup callers must be commented.
2219 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2221 unsigned int len = name->len;
2222 unsigned int hash = name->hash;
2223 const unsigned char *str = name->name;
2224 struct hlist_bl_head *b = d_hash(parent, hash);
2225 struct hlist_bl_node *node;
2226 struct dentry *found = NULL;
2227 struct dentry *dentry;
2230 * Note: There is significant duplication with __d_lookup_rcu which is
2231 * required to prevent single threaded performance regressions
2232 * especially on architectures where smp_rmb (in seqcounts) are costly.
2233 * Keep the two functions in sync.
2237 * The hash list is protected using RCU.
2239 * Take d_lock when comparing a candidate dentry, to avoid races
2242 * It is possible that concurrent renames can mess up our list
2243 * walk here and result in missing our dentry, resulting in the
2244 * false-negative result. d_lookup() protects against concurrent
2245 * renames using rename_lock seqlock.
2247 * See Documentation/filesystems/path-lookup.txt for more details.
2251 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2253 if (dentry->d_name.hash != hash)
2256 spin_lock(&dentry->d_lock);
2257 if (dentry->d_parent != parent)
2259 if (d_unhashed(dentry))
2263 * It is safe to compare names since d_move() cannot
2264 * change the qstr (protected by d_lock).
2266 if (parent->d_flags & DCACHE_OP_COMPARE) {
2267 int tlen = dentry->d_name.len;
2268 const char *tname = dentry->d_name.name;
2269 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2272 if (dentry->d_name.len != len)
2274 if (dentry_cmp(dentry, str, len))
2278 dentry->d_lockref.count++;
2280 spin_unlock(&dentry->d_lock);
2283 spin_unlock(&dentry->d_lock);
2291 * d_hash_and_lookup - hash the qstr then search for a dentry
2292 * @dir: Directory to search in
2293 * @name: qstr of name we wish to find
2295 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2297 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2300 * Check for a fs-specific hash function. Note that we must
2301 * calculate the standard hash first, as the d_op->d_hash()
2302 * routine may choose to leave the hash value unchanged.
2304 name->hash = full_name_hash(name->name, name->len);
2305 if (dir->d_flags & DCACHE_OP_HASH) {
2306 int err = dir->d_op->d_hash(dir, name);
2307 if (unlikely(err < 0))
2308 return ERR_PTR(err);
2310 return d_lookup(dir, name);
2312 EXPORT_SYMBOL(d_hash_and_lookup);
2315 * d_validate - verify dentry provided from insecure source (deprecated)
2316 * @dentry: The dentry alleged to be valid child of @dparent
2317 * @dparent: The parent dentry (known to be valid)
2319 * An insecure source has sent us a dentry, here we verify it and dget() it.
2320 * This is used by ncpfs in its readdir implementation.
2321 * Zero is returned in the dentry is invalid.
2323 * This function is slow for big directories, and deprecated, do not use it.
2325 int d_validate(struct dentry *dentry, struct dentry *dparent)
2327 struct dentry *child;
2329 spin_lock(&dparent->d_lock);
2330 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2331 if (dentry == child) {
2332 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2333 __dget_dlock(dentry);
2334 spin_unlock(&dentry->d_lock);
2335 spin_unlock(&dparent->d_lock);
2339 spin_unlock(&dparent->d_lock);
2343 EXPORT_SYMBOL(d_validate);
2346 * When a file is deleted, we have two options:
2347 * - turn this dentry into a negative dentry
2348 * - unhash this dentry and free it.
2350 * Usually, we want to just turn this into
2351 * a negative dentry, but if anybody else is
2352 * currently using the dentry or the inode
2353 * we can't do that and we fall back on removing
2354 * it from the hash queues and waiting for
2355 * it to be deleted later when it has no users
2359 * d_delete - delete a dentry
2360 * @dentry: The dentry to delete
2362 * Turn the dentry into a negative dentry if possible, otherwise
2363 * remove it from the hash queues so it can be deleted later
2366 void d_delete(struct dentry * dentry)
2368 struct inode *inode;
2371 * Are we the only user?
2374 spin_lock(&dentry->d_lock);
2375 inode = dentry->d_inode;
2376 isdir = S_ISDIR(inode->i_mode);
2377 if (dentry->d_lockref.count == 1) {
2378 if (!spin_trylock(&inode->i_lock)) {
2379 spin_unlock(&dentry->d_lock);
2383 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2384 dentry_unlink_inode(dentry);
2385 fsnotify_nameremove(dentry, isdir);
2389 if (!d_unhashed(dentry))
2392 spin_unlock(&dentry->d_lock);
2394 fsnotify_nameremove(dentry, isdir);
2396 EXPORT_SYMBOL(d_delete);
2398 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2400 BUG_ON(!d_unhashed(entry));
2402 entry->d_flags |= DCACHE_RCUACCESS;
2403 hlist_bl_add_head_rcu(&entry->d_hash, b);
2407 static void _d_rehash(struct dentry * entry)
2409 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2413 * d_rehash - add an entry back to the hash
2414 * @entry: dentry to add to the hash
2416 * Adds a dentry to the hash according to its name.
2419 void d_rehash(struct dentry * entry)
2421 spin_lock(&entry->d_lock);
2423 spin_unlock(&entry->d_lock);
2425 EXPORT_SYMBOL(d_rehash);
2428 * dentry_update_name_case - update case insensitive dentry with a new name
2429 * @dentry: dentry to be updated
2432 * Update a case insensitive dentry with new case of name.
2434 * dentry must have been returned by d_lookup with name @name. Old and new
2435 * name lengths must match (ie. no d_compare which allows mismatched name
2438 * Parent inode i_mutex must be held over d_lookup and into this call (to
2439 * keep renames and concurrent inserts, and readdir(2) away).
2441 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2443 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2444 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2446 spin_lock(&dentry->d_lock);
2447 write_seqcount_begin(&dentry->d_seq);
2448 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2449 write_seqcount_end(&dentry->d_seq);
2450 spin_unlock(&dentry->d_lock);
2452 EXPORT_SYMBOL(dentry_update_name_case);
2454 static void switch_names(struct dentry *dentry, struct dentry *target)
2456 if (dname_external(target)) {
2457 if (dname_external(dentry)) {
2459 * Both external: swap the pointers
2461 swap(target->d_name.name, dentry->d_name.name);
2464 * dentry:internal, target:external. Steal target's
2465 * storage and make target internal.
2467 memcpy(target->d_iname, dentry->d_name.name,
2468 dentry->d_name.len + 1);
2469 dentry->d_name.name = target->d_name.name;
2470 target->d_name.name = target->d_iname;
2473 if (dname_external(dentry)) {
2475 * dentry:external, target:internal. Give dentry's
2476 * storage to target and make dentry internal
2478 memcpy(dentry->d_iname, target->d_name.name,
2479 target->d_name.len + 1);
2480 target->d_name.name = dentry->d_name.name;
2481 dentry->d_name.name = dentry->d_iname;
2484 * Both are internal. Just copy target to dentry
2486 memcpy(dentry->d_iname, target->d_name.name,
2487 target->d_name.len + 1);
2488 dentry->d_name.len = target->d_name.len;
2492 swap(dentry->d_name.len, target->d_name.len);
2495 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2498 * XXXX: do we really need to take target->d_lock?
2500 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2501 spin_lock(&target->d_parent->d_lock);
2503 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2504 spin_lock(&dentry->d_parent->d_lock);
2505 spin_lock_nested(&target->d_parent->d_lock,
2506 DENTRY_D_LOCK_NESTED);
2508 spin_lock(&target->d_parent->d_lock);
2509 spin_lock_nested(&dentry->d_parent->d_lock,
2510 DENTRY_D_LOCK_NESTED);
2513 if (target < dentry) {
2514 spin_lock_nested(&target->d_lock, 2);
2515 spin_lock_nested(&dentry->d_lock, 3);
2517 spin_lock_nested(&dentry->d_lock, 2);
2518 spin_lock_nested(&target->d_lock, 3);
2522 static void dentry_unlock_parents_for_move(struct dentry *dentry,
2523 struct dentry *target)
2525 if (target->d_parent != dentry->d_parent)
2526 spin_unlock(&dentry->d_parent->d_lock);
2527 if (target->d_parent != target)
2528 spin_unlock(&target->d_parent->d_lock);
2532 * When switching names, the actual string doesn't strictly have to
2533 * be preserved in the target - because we're dropping the target
2534 * anyway. As such, we can just do a simple memcpy() to copy over
2535 * the new name before we switch.
2537 * Note that we have to be a lot more careful about getting the hash
2538 * switched - we have to switch the hash value properly even if it
2539 * then no longer matches the actual (corrupted) string of the target.
2540 * The hash value has to match the hash queue that the dentry is on..
2543 * __d_move - move a dentry
2544 * @dentry: entry to move
2545 * @target: new dentry
2547 * Update the dcache to reflect the move of a file name. Negative
2548 * dcache entries should not be moved in this way. Caller must hold
2549 * rename_lock, the i_mutex of the source and target directories,
2550 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2552 static void __d_move(struct dentry * dentry, struct dentry * target)
2554 if (!dentry->d_inode)
2555 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2557 BUG_ON(d_ancestor(dentry, target));
2558 BUG_ON(d_ancestor(target, dentry));
2560 dentry_lock_for_move(dentry, target);
2562 write_seqcount_begin(&dentry->d_seq);
2563 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2565 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2568 * Move the dentry to the target hash queue. Don't bother checking
2569 * for the same hash queue because of how unlikely it is.
2572 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2574 /* Unhash the target: dput() will then get rid of it */
2577 list_del(&dentry->d_u.d_child);
2578 list_del(&target->d_u.d_child);
2580 /* Switch the names.. */
2581 switch_names(dentry, target);
2582 swap(dentry->d_name.hash, target->d_name.hash);
2584 /* ... and switch the parents */
2585 if (IS_ROOT(dentry)) {
2586 dentry->d_parent = target->d_parent;
2587 target->d_parent = target;
2588 INIT_LIST_HEAD(&target->d_u.d_child);
2590 swap(dentry->d_parent, target->d_parent);
2592 /* And add them back to the (new) parent lists */
2593 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2596 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2598 write_seqcount_end(&target->d_seq);
2599 write_seqcount_end(&dentry->d_seq);
2601 dentry_unlock_parents_for_move(dentry, target);
2602 spin_unlock(&target->d_lock);
2603 fsnotify_d_move(dentry);
2604 spin_unlock(&dentry->d_lock);
2608 * d_move - move a dentry
2609 * @dentry: entry to move
2610 * @target: new dentry
2612 * Update the dcache to reflect the move of a file name. Negative
2613 * dcache entries should not be moved in this way. See the locking
2614 * requirements for __d_move.
2616 void d_move(struct dentry *dentry, struct dentry *target)
2618 write_seqlock(&rename_lock);
2619 __d_move(dentry, target);
2620 write_sequnlock(&rename_lock);
2622 EXPORT_SYMBOL(d_move);
2625 * d_ancestor - search for an ancestor
2626 * @p1: ancestor dentry
2629 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2630 * an ancestor of p2, else NULL.
2632 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2636 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2637 if (p->d_parent == p1)
2644 * This helper attempts to cope with remotely renamed directories
2646 * It assumes that the caller is already holding
2647 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2649 * Note: If ever the locking in lock_rename() changes, then please
2650 * remember to update this too...
2652 static struct dentry *__d_unalias(struct inode *inode,
2653 struct dentry *dentry, struct dentry *alias)
2655 struct mutex *m1 = NULL, *m2 = NULL;
2656 struct dentry *ret = ERR_PTR(-EBUSY);
2658 /* If alias and dentry share a parent, then no extra locks required */
2659 if (alias->d_parent == dentry->d_parent)
2662 /* See lock_rename() */
2663 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2665 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2666 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2668 m2 = &alias->d_parent->d_inode->i_mutex;
2670 if (likely(!d_mountpoint(alias))) {
2671 __d_move(alias, dentry);
2675 spin_unlock(&inode->i_lock);
2684 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2685 * named dentry in place of the dentry to be replaced.
2686 * returns with anon->d_lock held!
2688 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2690 struct dentry *dparent;
2692 dentry_lock_for_move(anon, dentry);
2694 write_seqcount_begin(&dentry->d_seq);
2695 write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
2697 dparent = dentry->d_parent;
2699 switch_names(dentry, anon);
2700 swap(dentry->d_name.hash, anon->d_name.hash);
2702 dentry->d_parent = dentry;
2703 list_del_init(&dentry->d_u.d_child);
2704 anon->d_parent = dparent;
2705 list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2707 write_seqcount_end(&dentry->d_seq);
2708 write_seqcount_end(&anon->d_seq);
2710 dentry_unlock_parents_for_move(anon, dentry);
2711 spin_unlock(&dentry->d_lock);
2713 /* anon->d_lock still locked, returns locked */
2717 * d_materialise_unique - introduce an inode into the tree
2718 * @dentry: candidate dentry
2719 * @inode: inode to bind to the dentry, to which aliases may be attached
2721 * Introduces an dentry into the tree, substituting an extant disconnected
2722 * root directory alias in its place if there is one. Caller must hold the
2723 * i_mutex of the parent directory.
2725 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2727 struct dentry *actual;
2729 BUG_ON(!d_unhashed(dentry));
2733 __d_instantiate(dentry, NULL);
2738 spin_lock(&inode->i_lock);
2740 if (S_ISDIR(inode->i_mode)) {
2741 struct dentry *alias;
2743 /* Does an aliased dentry already exist? */
2744 alias = __d_find_alias(inode, 0);
2747 write_seqlock(&rename_lock);
2749 if (d_ancestor(alias, dentry)) {
2750 /* Check for loops */
2751 actual = ERR_PTR(-ELOOP);
2752 spin_unlock(&inode->i_lock);
2753 } else if (IS_ROOT(alias)) {
2754 /* Is this an anonymous mountpoint that we
2755 * could splice into our tree? */
2756 __d_materialise_dentry(dentry, alias);
2757 write_sequnlock(&rename_lock);
2761 /* Nope, but we must(!) avoid directory
2762 * aliasing. This drops inode->i_lock */
2763 actual = __d_unalias(inode, dentry, alias);
2765 write_sequnlock(&rename_lock);
2766 if (IS_ERR(actual)) {
2767 if (PTR_ERR(actual) == -ELOOP)
2768 pr_warn_ratelimited(
2769 "VFS: Lookup of '%s' in %s %s"
2770 " would have caused loop\n",
2771 dentry->d_name.name,
2772 inode->i_sb->s_type->name,
2780 /* Add a unique reference */
2781 actual = __d_instantiate_unique(dentry, inode);
2785 BUG_ON(!d_unhashed(actual));
2787 spin_lock(&actual->d_lock);
2790 spin_unlock(&actual->d_lock);
2791 spin_unlock(&inode->i_lock);
2793 if (actual == dentry) {
2794 security_d_instantiate(dentry, inode);
2801 EXPORT_SYMBOL_GPL(d_materialise_unique);
2803 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2807 return -ENAMETOOLONG;
2809 memcpy(*buffer, str, namelen);
2814 * prepend_name - prepend a pathname in front of current buffer pointer
2815 * @buffer: buffer pointer
2816 * @buflen: allocated length of the buffer
2817 * @name: name string and length qstr structure
2819 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2820 * make sure that either the old or the new name pointer and length are
2821 * fetched. However, there may be mismatch between length and pointer.
2822 * The length cannot be trusted, we need to copy it byte-by-byte until
2823 * the length is reached or a null byte is found. It also prepends "/" at
2824 * the beginning of the name. The sequence number check at the caller will
2825 * retry it again when a d_move() does happen. So any garbage in the buffer
2826 * due to mismatched pointer and length will be discarded.
2828 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2830 const char *dname = ACCESS_ONCE(name->name);
2831 u32 dlen = ACCESS_ONCE(name->len);
2834 *buflen -= dlen + 1;
2836 return -ENAMETOOLONG;
2837 p = *buffer -= dlen + 1;
2849 * prepend_path - Prepend path string to a buffer
2850 * @path: the dentry/vfsmount to report
2851 * @root: root vfsmnt/dentry
2852 * @buffer: pointer to the end of the buffer
2853 * @buflen: pointer to buffer length
2855 * The function will first try to write out the pathname without taking any
2856 * lock other than the RCU read lock to make sure that dentries won't go away.
2857 * It only checks the sequence number of the global rename_lock as any change
2858 * in the dentry's d_seq will be preceded by changes in the rename_lock
2859 * sequence number. If the sequence number had been changed, it will restart
2860 * the whole pathname back-tracing sequence again by taking the rename_lock.
2861 * In this case, there is no need to take the RCU read lock as the recursive
2862 * parent pointer references will keep the dentry chain alive as long as no
2863 * rename operation is performed.
2865 static int prepend_path(const struct path *path,
2866 const struct path *root,
2867 char **buffer, int *buflen)
2869 struct dentry *dentry;
2870 struct vfsmount *vfsmnt;
2873 unsigned seq, m_seq = 0;
2879 read_seqbegin_or_lock(&mount_lock, &m_seq);
2886 dentry = path->dentry;
2888 mnt = real_mount(vfsmnt);
2889 read_seqbegin_or_lock(&rename_lock, &seq);
2890 while (dentry != root->dentry || vfsmnt != root->mnt) {
2891 struct dentry * parent;
2893 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2894 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
2896 if (mnt != parent) {
2897 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
2903 * Filesystems needing to implement special "root names"
2904 * should do so with ->d_dname()
2906 if (IS_ROOT(dentry) &&
2907 (dentry->d_name.len != 1 ||
2908 dentry->d_name.name[0] != '/')) {
2909 WARN(1, "Root dentry has weird name <%.*s>\n",
2910 (int) dentry->d_name.len,
2911 dentry->d_name.name);
2914 error = is_mounted(vfsmnt) ? 1 : 2;
2917 parent = dentry->d_parent;
2919 error = prepend_name(&bptr, &blen, &dentry->d_name);
2927 if (need_seqretry(&rename_lock, seq)) {
2931 done_seqretry(&rename_lock, seq);
2935 if (need_seqretry(&mount_lock, m_seq)) {
2939 done_seqretry(&mount_lock, m_seq);
2941 if (error >= 0 && bptr == *buffer) {
2943 error = -ENAMETOOLONG;
2953 * __d_path - return the path of a dentry
2954 * @path: the dentry/vfsmount to report
2955 * @root: root vfsmnt/dentry
2956 * @buf: buffer to return value in
2957 * @buflen: buffer length
2959 * Convert a dentry into an ASCII path name.
2961 * Returns a pointer into the buffer or an error code if the
2962 * path was too long.
2964 * "buflen" should be positive.
2966 * If the path is not reachable from the supplied root, return %NULL.
2968 char *__d_path(const struct path *path,
2969 const struct path *root,
2970 char *buf, int buflen)
2972 char *res = buf + buflen;
2975 prepend(&res, &buflen, "\0", 1);
2976 error = prepend_path(path, root, &res, &buflen);
2979 return ERR_PTR(error);
2985 char *d_absolute_path(const struct path *path,
2986 char *buf, int buflen)
2988 struct path root = {};
2989 char *res = buf + buflen;
2992 prepend(&res, &buflen, "\0", 1);
2993 error = prepend_path(path, &root, &res, &buflen);
2998 return ERR_PTR(error);
3003 * same as __d_path but appends "(deleted)" for unlinked files.
3005 static int path_with_deleted(const struct path *path,
3006 const struct path *root,
3007 char **buf, int *buflen)
3009 prepend(buf, buflen, "\0", 1);
3010 if (d_unlinked(path->dentry)) {
3011 int error = prepend(buf, buflen, " (deleted)", 10);
3016 return prepend_path(path, root, buf, buflen);
3019 static int prepend_unreachable(char **buffer, int *buflen)
3021 return prepend(buffer, buflen, "(unreachable)", 13);
3024 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3029 seq = read_seqcount_begin(&fs->seq);
3031 } while (read_seqcount_retry(&fs->seq, seq));
3035 * d_path - return the path of a dentry
3036 * @path: path to report
3037 * @buf: buffer to return value in
3038 * @buflen: buffer length
3040 * Convert a dentry into an ASCII path name. If the entry has been deleted
3041 * the string " (deleted)" is appended. Note that this is ambiguous.
3043 * Returns a pointer into the buffer or an error code if the path was
3044 * too long. Note: Callers should use the returned pointer, not the passed
3045 * in buffer, to use the name! The implementation often starts at an offset
3046 * into the buffer, and may leave 0 bytes at the start.
3048 * "buflen" should be positive.
3050 char *d_path(const struct path *path, char *buf, int buflen)
3052 char *res = buf + buflen;
3057 * We have various synthetic filesystems that never get mounted. On
3058 * these filesystems dentries are never used for lookup purposes, and
3059 * thus don't need to be hashed. They also don't need a name until a
3060 * user wants to identify the object in /proc/pid/fd/. The little hack
3061 * below allows us to generate a name for these objects on demand:
3063 * Some pseudo inodes are mountable. When they are mounted
3064 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3065 * and instead have d_path return the mounted path.
3067 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3068 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3069 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3072 get_fs_root_rcu(current->fs, &root);
3073 error = path_with_deleted(path, &root, &res, &buflen);
3077 res = ERR_PTR(error);
3080 EXPORT_SYMBOL(d_path);
3083 * Helper function for dentry_operations.d_dname() members
3085 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3086 const char *fmt, ...)
3092 va_start(args, fmt);
3093 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3096 if (sz > sizeof(temp) || sz > buflen)
3097 return ERR_PTR(-ENAMETOOLONG);
3099 buffer += buflen - sz;
3100 return memcpy(buffer, temp, sz);
3103 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3105 char *end = buffer + buflen;
3106 /* these dentries are never renamed, so d_lock is not needed */
3107 if (prepend(&end, &buflen, " (deleted)", 11) ||
3108 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3109 prepend(&end, &buflen, "/", 1))
3110 end = ERR_PTR(-ENAMETOOLONG);
3115 * Write full pathname from the root of the filesystem into the buffer.
3117 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3119 struct dentry *dentry;
3132 prepend(&end, &len, "\0", 1);
3136 read_seqbegin_or_lock(&rename_lock, &seq);
3137 while (!IS_ROOT(dentry)) {
3138 struct dentry *parent = dentry->d_parent;
3141 error = prepend_name(&end, &len, &dentry->d_name);
3150 if (need_seqretry(&rename_lock, seq)) {
3154 done_seqretry(&rename_lock, seq);
3159 return ERR_PTR(-ENAMETOOLONG);
3162 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3164 return __dentry_path(dentry, buf, buflen);
3166 EXPORT_SYMBOL(dentry_path_raw);
3168 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3173 if (d_unlinked(dentry)) {
3175 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3179 retval = __dentry_path(dentry, buf, buflen);
3180 if (!IS_ERR(retval) && p)
3181 *p = '/'; /* restore '/' overriden with '\0' */
3184 return ERR_PTR(-ENAMETOOLONG);
3187 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3193 seq = read_seqcount_begin(&fs->seq);
3196 } while (read_seqcount_retry(&fs->seq, seq));
3200 * NOTE! The user-level library version returns a
3201 * character pointer. The kernel system call just
3202 * returns the length of the buffer filled (which
3203 * includes the ending '\0' character), or a negative
3204 * error value. So libc would do something like
3206 * char *getcwd(char * buf, size_t size)
3210 * retval = sys_getcwd(buf, size);
3217 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3220 struct path pwd, root;
3221 char *page = __getname();
3227 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3230 if (!d_unlinked(pwd.dentry)) {
3232 char *cwd = page + PATH_MAX;
3233 int buflen = PATH_MAX;
3235 prepend(&cwd, &buflen, "\0", 1);
3236 error = prepend_path(&pwd, &root, &cwd, &buflen);
3242 /* Unreachable from current root */
3244 error = prepend_unreachable(&cwd, &buflen);
3250 len = PATH_MAX + page - cwd;
3253 if (copy_to_user(buf, cwd, len))
3266 * Test whether new_dentry is a subdirectory of old_dentry.
3268 * Trivially implemented using the dcache structure
3272 * is_subdir - is new dentry a subdirectory of old_dentry
3273 * @new_dentry: new dentry
3274 * @old_dentry: old dentry
3276 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3277 * Returns 0 otherwise.
3278 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3281 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3286 if (new_dentry == old_dentry)
3290 /* for restarting inner loop in case of seq retry */
3291 seq = read_seqbegin(&rename_lock);
3293 * Need rcu_readlock to protect against the d_parent trashing
3297 if (d_ancestor(old_dentry, new_dentry))
3302 } while (read_seqretry(&rename_lock, seq));
3307 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3309 struct dentry *root = data;
3310 if (dentry != root) {
3311 if (d_unhashed(dentry) || !dentry->d_inode)
3314 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3315 dentry->d_flags |= DCACHE_GENOCIDE;
3316 dentry->d_lockref.count--;
3319 return D_WALK_CONTINUE;
3322 void d_genocide(struct dentry *parent)
3324 d_walk(parent, parent, d_genocide_kill, NULL);
3327 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3329 inode_dec_link_count(inode);
3330 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3331 !hlist_unhashed(&dentry->d_alias) ||
3332 !d_unlinked(dentry));
3333 spin_lock(&dentry->d_parent->d_lock);
3334 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3335 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3336 (unsigned long long)inode->i_ino);
3337 spin_unlock(&dentry->d_lock);
3338 spin_unlock(&dentry->d_parent->d_lock);
3339 d_instantiate(dentry, inode);
3341 EXPORT_SYMBOL(d_tmpfile);
3343 static __initdata unsigned long dhash_entries;
3344 static int __init set_dhash_entries(char *str)
3348 dhash_entries = simple_strtoul(str, &str, 0);
3351 __setup("dhash_entries=", set_dhash_entries);
3353 static void __init dcache_init_early(void)
3357 /* If hashes are distributed across NUMA nodes, defer
3358 * hash allocation until vmalloc space is available.
3364 alloc_large_system_hash("Dentry cache",
3365 sizeof(struct hlist_bl_head),
3374 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3375 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3378 static void __init dcache_init(void)
3383 * A constructor could be added for stable state like the lists,
3384 * but it is probably not worth it because of the cache nature
3387 dentry_cache = KMEM_CACHE(dentry,
3388 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3390 /* Hash may have been set up in dcache_init_early */
3395 alloc_large_system_hash("Dentry cache",
3396 sizeof(struct hlist_bl_head),
3405 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3406 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3409 /* SLAB cache for __getname() consumers */
3410 struct kmem_cache *names_cachep __read_mostly;
3411 EXPORT_SYMBOL(names_cachep);
3413 EXPORT_SYMBOL(d_genocide);
3415 void __init vfs_caches_init_early(void)
3417 dcache_init_early();
3421 void __init vfs_caches_init(unsigned long mempages)
3423 unsigned long reserve;
3425 /* Base hash sizes on available memory, with a reserve equal to
3426 150% of current kernel size */
3428 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3429 mempages -= reserve;
3431 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3432 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3436 files_init(mempages);