if (iocb->ki_pos < 0)
return -EINVAL;
+ if (opcode == IOCB_CMD_PWRITEV)
+ file_start_write(file);
do {
ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
iocb->ki_nr_segs - iocb->ki_cur_seg,
} while (ret > 0 && iocb->ki_left > 0 &&
(opcode == IOCB_CMD_PWRITEV ||
(!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
+ if (opcode == IOCB_CMD_PWRITEV)
+ file_end_write(file);
/* This means we must have transferred all that we could */
/* No need to retry anymore */
size_t count, ocount;
bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
- sb_start_write(inode->i_sb);
-
mutex_lock(&inode->i_mutex);
err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
if (sync)
atomic_dec(&BTRFS_I(inode)->sync_writers);
out:
- sb_end_write(inode->i_sb);
current->backing_dev_info = NULL;
return num_written ? num_written : err;
}
BUG_ON(iocb->ki_pos != pos);
- sb_start_write(inode->i_sb);
-
/*
* We need to hold the sem to be sure nobody modifies lock list
* with a brlock that prevents writing.
}
up_read(&cinode->lock_sem);
- sb_end_write(inode->i_sb);
return rc;
}
fnv = file->f_op->aio_write;
}
- if (fnv)
+ if (fnv) {
+ file_start_write(file);
ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
pos, fnv);
- else
+ file_end_write(file);
+ } else
ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
out:
return err;
count = ocount;
- sb_start_write(inode->i_sb);
mutex_lock(&inode->i_mutex);
/* We can write back this queue in page reclaim */
out:
current->backing_dev_info = NULL;
mutex_unlock(&inode->i_mutex);
- sb_end_write(inode->i_sb);
return written ? written : err;
}
BUG_ON(iocb->ki_pos != pos);
- sb_start_write(inode->i_sb);
mutex_lock(&inode->i_mutex);
ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (err < 0)
ret = err;
}
- sb_end_write(inode->i_sb);
return ret;
}
if (iocb->ki_left == 0)
return 0;
- sb_start_write(inode->i_sb);
-
appending = file->f_flags & O_APPEND ? 1 : 0;
direct_io = file->f_flags & O_DIRECT ? 1 : 0;
ocfs2_iocb_clear_sem_locked(iocb);
mutex_unlock(&inode->i_mutex);
- sb_end_write(inode->i_sb);
if (written)
ret = written;
struct kiocb kiocb;
ssize_t ret;
+ file_start_write(filp);
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = *ppos;
kiocb.ki_left = len;
if (-EIOCBQUEUED == ret)
ret = wait_on_sync_kiocb(&kiocb);
*ppos = kiocb.ki_pos;
+ file_end_write(filp);
return ret;
}
fnv = file->f_op->aio_write;
}
- if (fnv)
+ if (fnv) {
+ file_start_write(file);
ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
pos, fnv);
- else
+ file_end_write(file);
+ } else
ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
out:
if (ocount == 0)
return 0;
- sb_start_write(inode->i_sb);
-
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
ret = -EIO;
goto out;
}
out:
- sb_end_write(inode->i_sb);
return ret;
}
return f->f_inode;
}
+static inline void file_start_write(struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return;
+ __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
+}
+
+static inline void file_end_write(struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return;
+ __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+}
+
/*
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
BUG_ON(iocb->ki_pos != pos);
- sb_start_write(inode->i_sb);
mutex_lock(&inode->i_mutex);
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (err < 0 && ret > 0)
ret = err;
}
- sb_end_write(inode->i_sb);
return ret;
}
EXPORT_SYMBOL(generic_file_aio_write);