#include <linux/pagevec.h>
#include <linux/writeback.h>
+
+/*
+ * Prime number of hash buckets since address is used as the key.
+ */
+#define NVSYNC 37
+#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
+static wait_queue_head_t xfs_ioend_wq[NVSYNC];
+
+void __init
+xfs_ioend_init(void)
+{
+ int i;
+
+ for (i = 0; i < NVSYNC; i++)
+ init_waitqueue_head(&xfs_ioend_wq[i]);
+}
+
+void
+xfs_ioend_wait(
+ xfs_inode_t *ip)
+{
+ wait_queue_head_t *wq = to_ioend_wq(ip);
+
+ wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
+}
+
+STATIC void
+xfs_ioend_wake(
+ xfs_inode_t *ip)
+{
+ if (atomic_dec_and_test(&ip->i_iocount))
+ wake_up(to_ioend_wq(ip));
+}
+
STATIC void
xfs_count_page_state(
struct page *page,
__FILE__, __LINE__);
}
- vn_iowake(ip);
+ xfs_ioend_wake(ip);
mempool_free(ioend, xfs_ioend_pool);
}
unlock_buffer(bh);
} while ((bh = next_bh) != NULL);
- vn_iowake(XFS_I(ioend->io_inode));
+ xfs_ioend_wake(XFS_I(ioend->io_inode));
mempool_free(ioend, xfs_ioend_pool);
} while ((ioend = next) != NULL);
}
extern const struct address_space_operations xfs_address_space_operations;
extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
+extern void xfs_ioend_init(void);
+extern void xfs_ioend_wait(struct xfs_inode *);
+
#endif /* __XFS_AOPS_H__ */
XFS_BUILD_OPTIONS " enabled\n");
ktrace_init(64);
- vn_init();
+ xfs_ioend_init();
xfs_dir_startup();
error = xfs_init_zones();
lock_flags |= XFS_IOLOCK_SHARED;
error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
if (flags & SYNC_IOWAIT)
- vn_iowait(ip);
+ xfs_ioend_wait(ip);
}
xfs_ilock(ip, XFS_ILOCK_SHARED);
#include "xfs_mount.h"
-/*
- * Dedicated vnode inactive/reclaim sync wait queues.
- * Prime number of hash buckets since address is used as the key.
- */
-#define NVSYNC 37
-#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
-static wait_queue_head_t vsync[NVSYNC];
-
-void __init
-vn_init(void)
-{
- int i;
-
- for (i = 0; i < NVSYNC; i++)
- init_waitqueue_head(&vsync[i]);
-}
-
-void
-vn_iowait(
- xfs_inode_t *ip)
-{
- wait_queue_head_t *wq = vptosync(ip);
-
- wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
-}
-
-void
-vn_iowake(
- xfs_inode_t *ip)
-{
- if (atomic_dec_and_test(&ip->i_iocount))
- wake_up(vptosync(ip));
-}
-
#ifdef XFS_INODE_TRACE
#define KTRACE_ENTER(ip, vk, s, line, ra) \
Prevent VM access to the pages until
the operation completes. */
-
-extern void vn_init(void);
-
-/*
- * Yeah, these don't take vnode anymore at all, all this should be
- * cleaned up at some point.
- */
-extern void vn_iowait(struct xfs_inode *ip);
-extern void vn_iowake(struct xfs_inode *ip);
-
#define IHOLD(ip) \
do { \
ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
* direct I/O with the truncate operation. Also, because we hold
* the IOLOCK in exclusive mode, we prevent new direct I/Os from being
* started until the truncate completes and drops the lock. Essentially,
- * the vn_iowait() call forms an I/O barrier that provides strict ordering
- * between direct I/Os and the truncate operation.
+ * the xfs_ioend_wait() call forms an I/O barrier that provides strict
+ * ordering between direct I/Os and the truncate operation.
*
* The flags parameter can have either the value XFS_ITRUNC_DEFINITE
* or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
/* wait for the completion of any pending DIOs */
if (new_size == 0 || new_size < ip->i_size)
- vn_iowait(ip);
+ xfs_ioend_wait(ip);
/*
* Call toss_pages or flushinval_pages to get rid of pages
}
/* wait for all I/O to complete */
- vn_iowait(ip);
+ xfs_ioend_wait(ip);
if (!code)
code = xfs_itruncate_data(ip, iattr->ia_size);
return 0;
}
- vn_iowait(ip);
+ xfs_ioend_wait(ip);
ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
need_iolock = 0;
if (need_iolock) {
xfs_ilock(ip, XFS_IOLOCK_EXCL);
- vn_iowait(ip); /* wait for the completion of any pending DIOs */
+ /* wait for the completion of any pending DIOs */
+ xfs_ioend_wait(ip);
}
rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);