2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_vnodeops.h"
20 #include "xfs_bmap_btree.h"
21 #include "xfs_inode.h"
23 uint64_t vn_generation; /* vnode generation number */
24 DEFINE_SPINLOCK(vnumber_lock);
27 * Dedicated vnode inactive/reclaim sync semaphores.
28 * Prime number of hash buckets since address is used as the key.
31 #define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
32 static wait_queue_head_t vsync[NVSYNC];
39 for (i = 0; i < NVSYNC; i++)
40 init_waitqueue_head(&vsync[i]);
47 wait_queue_head_t *wq = vptosync(vp);
49 wait_event(*wq, (atomic_read(&vp->v_iocount) == 0));
56 if (atomic_dec_and_test(&vp->v_iocount))
57 wake_up(vptosync(vp));
61 * Volume managers supporting multiple paths can send back ENODEV when the
62 * final path disappears. In this case continuing to fill the page cache
63 * with dirty data which cannot be written out is evil, so prevent that.
72 bhv_vfs_t *vfsp = vfs_from_sb(vp->v_inode.i_sb);
74 if (unlikely(error == -ENODEV))
75 bhv_vfs_force_shutdown(vfsp, SHUTDOWN_DEVICE_REQ, f, l);
82 bhv_vnode_t *vp = vn_from_inode(inode);
84 XFS_STATS_INC(vn_active);
85 XFS_STATS_INC(vn_alloc);
87 spin_lock(&vnumber_lock);
88 if (!++vn_generation) /* v_number shouldn't be zero */
90 vp->v_number = vn_generation;
91 spin_unlock(&vnumber_lock);
93 ASSERT(VN_CACHED(vp) == 0);
95 atomic_set(&vp->v_iocount, 0);
97 #ifdef XFS_VNODE_TRACE
98 vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
99 #endif /* XFS_VNODE_TRACE */
101 vn_trace_exit(vp, __FUNCTION__, (inst_t *)__return_address);
106 * Revalidate the Linux inode from the vattr.
107 * Note: i_size _not_ updated; we must hold the inode
108 * semaphore when doing that - callers responsibility.
115 struct inode *inode = vn_to_inode(vp);
117 inode->i_mode = vap->va_mode;
118 inode->i_nlink = vap->va_nlink;
119 inode->i_uid = vap->va_uid;
120 inode->i_gid = vap->va_gid;
121 inode->i_blocks = vap->va_nblocks;
122 inode->i_mtime = vap->va_mtime;
123 inode->i_ctime = vap->va_ctime;
124 if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
125 inode->i_flags |= S_IMMUTABLE;
127 inode->i_flags &= ~S_IMMUTABLE;
128 if (vap->va_xflags & XFS_XFLAG_APPEND)
129 inode->i_flags |= S_APPEND;
131 inode->i_flags &= ~S_APPEND;
132 if (vap->va_xflags & XFS_XFLAG_SYNC)
133 inode->i_flags |= S_SYNC;
135 inode->i_flags &= ~S_SYNC;
136 if (vap->va_xflags & XFS_XFLAG_NOATIME)
137 inode->i_flags |= S_NOATIME;
139 inode->i_flags &= ~S_NOATIME;
143 * Revalidate the Linux inode from the vnode.
152 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
153 vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS;
154 error = xfs_getattr(xfs_vtoi(vp), vattr, 0);
155 if (likely(!error)) {
156 vn_revalidate_core(vp, vattr);
157 xfs_iflags_clear(xfs_vtoi(vp), XFS_IMODIFIED);
168 return __vn_revalidate(vp, &vattr);
172 * Add a reference to a referenced vnode.
180 XFS_STATS_INC(vn_hold);
182 inode = igrab(vn_to_inode(vp));
188 #ifdef XFS_VNODE_TRACE
190 #define KTRACE_ENTER(vp, vk, s, line, ra) \
191 ktrace_enter( (vp)->v_trace, \
192 /* 0 */ (void *)(__psint_t)(vk), \
193 /* 1 */ (void *)(s), \
194 /* 2 */ (void *)(__psint_t) line, \
195 /* 3 */ (void *)(__psint_t)(vn_count(vp)), \
196 /* 4 */ (void *)(ra), \
198 /* 6 */ (void *)(__psint_t)current_cpu(), \
199 /* 7 */ (void *)(__psint_t)current_pid(), \
200 /* 8 */ (void *)__return_address, \
201 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
204 * Vnode tracing code.
207 vn_trace_entry(bhv_vnode_t *vp, const char *func, inst_t *ra)
209 KTRACE_ENTER(vp, VNODE_KTRACE_ENTRY, func, 0, ra);
213 vn_trace_exit(bhv_vnode_t *vp, const char *func, inst_t *ra)
215 KTRACE_ENTER(vp, VNODE_KTRACE_EXIT, func, 0, ra);
219 vn_trace_hold(bhv_vnode_t *vp, char *file, int line, inst_t *ra)
221 KTRACE_ENTER(vp, VNODE_KTRACE_HOLD, file, line, ra);
225 vn_trace_ref(bhv_vnode_t *vp, char *file, int line, inst_t *ra)
227 KTRACE_ENTER(vp, VNODE_KTRACE_REF, file, line, ra);
231 vn_trace_rele(bhv_vnode_t *vp, char *file, int line, inst_t *ra)
233 KTRACE_ENTER(vp, VNODE_KTRACE_RELE, file, line, ra);
235 #endif /* XFS_VNODE_TRACE */