struct xfs_mount *mp,
struct inode *inode)
{
- int error;
- uint32_t nlink = inode->i_nlink;
- uint32_t generation = inode->i_generation;
- uint64_t version = inode_peek_iversion(inode);
- umode_t mode = inode->i_mode;
- dev_t dev = inode->i_rdev;
- kuid_t uid = inode->i_uid;
- kgid_t gid = inode->i_gid;
+ int error;
+ uint32_t nlink = inode->i_nlink;
+ uint32_t generation = inode->i_generation;
+ uint64_t version = inode_peek_iversion(inode);
+ umode_t mode = inode->i_mode;
+ dev_t dev = inode->i_rdev;
+ kuid_t uid = inode->i_uid;
+ kgid_t gid = inode->i_gid;
error = inode_init_always(mp->m_super, inode);
}
/*
+ * Carefully nudge an inode whose VFS state has been torn down back into a
+ * usable state. Drops the i_flags_lock and the rcu read lock.
+ */
+static int
+xfs_iget_recycle(
+ struct xfs_perag *pag,
+ struct xfs_inode *ip) __releases(&ip->i_flags_lock)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
+ int error;
+
+ trace_xfs_iget_recycle(ip);
+
+ /*
+ * We need to make it look like the inode is being reclaimed to prevent
+ * the actual reclaim workers from stomping over us while we recycle
+ * the inode. We can't clear the radix tree tag yet as it requires
+ * pag_ici_lock to be held exclusive.
+ */
+ ip->i_flags |= XFS_IRECLAIM;
+
+ spin_unlock(&ip->i_flags_lock);
+ rcu_read_unlock();
+
+ ASSERT(!rwsem_is_locked(&inode->i_rwsem));
+ error = xfs_reinit_inode(mp, inode);
+ if (error) {
+ bool wake;
+
+ /*
+ * Re-initializing the inode failed, and we are in deep
+ * trouble. Try to re-add it to the reclaim list.
+ */
+ rcu_read_lock();
+ spin_lock(&ip->i_flags_lock);
+ wake = !!__xfs_iflags_test(ip, XFS_INEW);
+ ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+ if (wake)
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
+ ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
+ spin_unlock(&ip->i_flags_lock);
+ rcu_read_unlock();
+
+ trace_xfs_iget_recycle_fail(ip);
+ return error;
+ }
+
+ spin_lock(&pag->pag_ici_lock);
+ spin_lock(&ip->i_flags_lock);
+
+ /*
+ * Clear the per-lifetime state in the inode as we are now effectively
+ * a new inode and need to return to the initial state before reuse
+ * occurs.
+ */
+ ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
+ ip->i_flags |= XFS_INEW;
+ xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_RECLAIM_TAG);
+ inode->i_state = I_NEW;
+ spin_unlock(&ip->i_flags_lock);
+ spin_unlock(&pag->pag_ici_lock);
+
+ return 0;
+}
+
+/*
* If we are allocating a new inode, then check what was returned is
* actually a free, empty inode. If we are not allocating an inode,
* then check we didn't find a free inode.
/*
* If we are racing with another cache hit that is currently
* instantiating this inode or currently recycling it out of
- * reclaimabe state, wait for the initialisation to complete
+ * reclaimable state, wait for the initialisation to complete
* before continuing.
*
* XXX(hch): eventually we should do something equivalent to
if (error)
goto out_error;
- /*
- * If IRECLAIMABLE is set, we've torn down the VFS inode already.
- * Need to carefully get it back into useable state.
- */
if (ip->i_flags & XFS_IRECLAIMABLE) {
- trace_xfs_iget_reclaim(ip);
-
if (flags & XFS_IGET_INCORE) {
error = -EAGAIN;
goto out_error;
}
- /*
- * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
- * from stomping over us while we recycle the inode. We can't
- * clear the radix tree reclaimable tag yet as it requires
- * pag_ici_lock to be held exclusive.
- */
- ip->i_flags |= XFS_IRECLAIM;
-
- spin_unlock(&ip->i_flags_lock);
- rcu_read_unlock();
-
- ASSERT(!rwsem_is_locked(&inode->i_rwsem));
- error = xfs_reinit_inode(mp, inode);
- if (error) {
- bool wake;
- /*
- * Re-initializing the inode failed, and we are in deep
- * trouble. Try to re-add it to the reclaim list.
- */
- rcu_read_lock();
- spin_lock(&ip->i_flags_lock);
- wake = !!__xfs_iflags_test(ip, XFS_INEW);
- ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
- if (wake)
- wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
- ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
- trace_xfs_iget_reclaim_fail(ip);
- goto out_error;
- }
-
- spin_lock(&pag->pag_ici_lock);
- spin_lock(&ip->i_flags_lock);
-
- /*
- * Clear the per-lifetime state in the inode as we are now
- * effectively a new inode and need to return to the initial
- * state before reuse occurs.
- */
- ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
- ip->i_flags |= XFS_INEW;
- xfs_perag_clear_inode_tag(pag,
- XFS_INO_TO_AGINO(pag->pag_mount, ino),
- XFS_ICI_RECLAIM_TAG);
- inode->i_state = I_NEW;
- spin_unlock(&ip->i_flags_lock);
- spin_unlock(&pag->pag_ici_lock);
+ /* Drops i_flags_lock and RCU read lock. */
+ error = xfs_iget_recycle(pag, ip);
+ if (error)
+ return error;
} else {
/* If the VFS inode is being torn down, pause and try again. */
if (!igrab(inode)) {
return error;
}
-
static int
xfs_iget_cache_miss(
struct xfs_mount *mp,