}
static ssize_t
-ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
+ struct inode *inode = file_inode(iocb->ki_filp);
struct blk_plug plug;
int unaligned_aio = 0;
- ssize_t ret;
int overwrite = 0;
size_t length = iov_length(iov, nr_segs);
-
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
- !is_sync_kiocb(iocb))
- unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
-
- /* Unaligned direct AIO must be serialized; see comment above */
- if (unaligned_aio) {
- mutex_lock(ext4_aio_mutex(inode));
- ext4_unwritten_wait(inode);
- }
-
- mutex_lock(&inode->i_mutex);
- blk_start_plug(&plug);
-
- iocb->private = &overwrite;
-
- /* check whether we do a DIO overwrite or not */
- if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
- !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
- struct ext4_map_blocks map;
- unsigned int blkbits = inode->i_blkbits;
- int err, len;
-
- map.m_lblk = pos >> blkbits;
- map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
- - map.m_lblk;
- len = map.m_len;
-
- err = ext4_map_blocks(NULL, inode, &map, 0);
- /*
- * 'err==len' means that all of blocks has been preallocated no
- * matter they are initialized or not. For excluding
- * unwritten extents, we need to check m_flags. There are
- * two conditions that indicate for initialized extents.
- * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
- * 2) If we do a real lookup, non-flags are returned.
- * So we should check these two conditions.
- */
- if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
- overwrite = 1;
- }
-
- ret = __generic_file_aio_write(iocb, iov, nr_segs);
- mutex_unlock(&inode->i_mutex);
-
- if (ret > 0) {
- ssize_t err;
-
- err = generic_write_sync(file, iocb->ki_pos - ret, ret);
- if (err < 0)
- ret = err;
- }
- blk_finish_plug(&plug);
-
- if (unaligned_aio)
- mutex_unlock(ext4_aio_mutex(inode));
-
- return ret;
-}
-
-static ssize_t
-ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- size_t length = iov_length(iov, nr_segs);
if ((pos > sbi->s_bitmap_maxbytes ||
(pos == sbi->s_bitmap_maxbytes && length > 0)))
}
}
- if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
- ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
- else {
+ if (unlikely(iocb->ki_filp->f_flags & O_DIRECT)) {
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
+ !is_sync_kiocb(iocb))
+ unaligned_aio = ext4_unaligned_aio(inode, iov,
+ nr_segs, pos);
+
+ /* Unaligned direct AIO must be serialized; see comment above */
+ if (unaligned_aio) {
+ mutex_lock(ext4_aio_mutex(inode));
+ ext4_unwritten_wait(inode);
+ }
+
+ mutex_lock(&inode->i_mutex);
+ blk_start_plug(&plug);
+
+ iocb->private = &overwrite;
+
+ /* check whether we do a DIO overwrite or not */
+ if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
+ !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
+ struct ext4_map_blocks map;
+ unsigned int blkbits = inode->i_blkbits;
+ int err, len;
+
+ map.m_lblk = pos >> blkbits;
+ map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
+ - map.m_lblk;
+ len = map.m_len;
+
+ err = ext4_map_blocks(NULL, inode, &map, 0);
+ /*
+ * 'err==len' means that all of blocks has
+ * been preallocated no matter they are
+ * initialized or not. For excluding
+ * unwritten extents, we need to check
+ * m_flags. There are two conditions that
+ * indicate for initialized extents. 1) If we
+ * hit extent cache, EXT4_MAP_MAPPED flag is
+ * returned; 2) If we do a real lookup,
+ * non-flags are returned. So we should check
+ * these two conditions.
+ */
+ if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
+ overwrite = 1;
+ }
+
+ ret = __generic_file_aio_write(iocb, iov, nr_segs);
+ mutex_unlock(&inode->i_mutex);
+
+ if (ret > 0) {
+ ssize_t err;
+
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ if (err < 0)
+ ret = err;
+ }
+ blk_finish_plug(&plug);
+
+ if (unaligned_aio)
+ mutex_unlock(ext4_aio_mutex(inode));
+ } else {
mutex_lock(&inode->i_mutex);
ret = __generic_file_aio_write(iocb, iov, nr_segs);
mutex_unlock(&inode->i_mutex);