/* Unaligned direct AIO must be serialized; see comment above */
if (unaligned_aio) {
- static unsigned long unaligned_warn_time;
-
- /* Warn about this once per day */
- if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
- ext4_msg(inode->i_sb, KERN_WARNING,
- "Unaligned AIO/DIO on inode %ld by %s; "
- "performance will be poor.",
- inode->i_ino, current->comm);
mutex_lock(ext4_aio_mutex(inode));
ext4_unwritten_wait(inode);
}
* page cache has data or not.
*/
static int ext4_find_unwritten_pgoff(struct inode *inode,
- int origin,
+ int whence,
struct ext4_map_blocks *map,
loff_t *offset)
{
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
(pgoff_t)num);
if (nr_pages == 0) {
- if (origin == SEEK_DATA)
+ if (whence == SEEK_DATA)
break;
- BUG_ON(origin != SEEK_HOLE);
+ BUG_ON(whence != SEEK_HOLE);
/*
* If this is the first time to go into the loop and
* offset is not beyond the end offset, it will be a
* offset is smaller than the first page offset, it will be a
* hole at this offset.
*/
- if (lastoff == startoff && origin == SEEK_HOLE &&
+ if (lastoff == startoff && whence == SEEK_HOLE &&
lastoff < page_offset(pvec.pages[0])) {
found = 1;
break;
* If the current offset is not beyond the end of given
* range, it will be a hole.
*/
- if (lastoff < endoff && origin == SEEK_HOLE &&
+ if (lastoff < endoff && whence == SEEK_HOLE &&
page->index > end) {
found = 1;
*offset = lastoff;
do {
if (buffer_uptodate(bh) ||
buffer_unwritten(bh)) {
- if (origin == SEEK_DATA)
+ if (whence == SEEK_DATA)
found = 1;
} else {
- if (origin == SEEK_HOLE)
+ if (whence == SEEK_HOLE)
found = 1;
}
if (found) {
* The no. of pages is less than our desired, that would be a
* hole in there.
*/
- if (nr_pages < num && origin == SEEK_HOLE) {
+ if (nr_pages < num && whence == SEEK_HOLE) {
found = 1;
*offset = lastoff;
break;
* by calling generic_file_llseek_size() with the appropriate maxbytes
* value for each.
*/
-loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
+loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
loff_t maxbytes;
else
maxbytes = inode->i_sb->s_maxbytes;
- switch (origin) {
+ switch (whence) {
case SEEK_SET:
case SEEK_CUR:
case SEEK_END:
- return generic_file_llseek_size(file, offset, origin,
+ return generic_file_llseek_size(file, offset, whence,
maxbytes, i_size_read(inode));
case SEEK_DATA:
return ext4_seek_data(file, offset, maxbytes);
if (!new_transaction)
goto alloc_transaction;
write_lock(&journal->j_state_lock);
- if (!journal->j_running_transaction) {
+ if (!journal->j_running_transaction &&
+ !journal->j_barrier_count) {
jbd2_get_transaction(journal, new_transaction);
new_transaction = NULL;
}
goto not_jbd;
}
- /* keep track of wether or not this transaction modified us */
+ /* keep track of whether or not this transaction modified us */
was_modified = jh->b_modified;
/*
BUFFER_TRACE(bh, "entry");
- retry:
/*
* It is safe to proceed here without the j_list_lock because the
* buffers cannot be stolen by try_to_free_buffers as long as we are
* for commit and try again.
*/
if (partial_page) {
- tid_t tid = journal->j_committing_transaction->t_tid;
-
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
write_unlock(&journal->j_state_lock);
- jbd2_log_wait_commit(journal, tid);
- goto retry;
+ return -EBUSY;
}
/*
* OK, buffer won't be reachable after truncate. We just set
* @page: page to flush
* @offset: length of page to invalidate.
*
- * Reap page buffers containing data after offset in page.
- *
+ * Reap page buffers containing data after offset in page. Can return -EBUSY
+ * if buffers are part of the committing transaction and the page is straddling
+ * i_size. Caller then has to wait for current commit and try again.
*/
- void jbd2_journal_invalidatepage(journal_t *journal,
- struct page *page,
- unsigned long offset)
+ int jbd2_journal_invalidatepage(journal_t *journal,
+ struct page *page,
+ unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
int may_free = 1;
+ int ret = 0;
if (!PageLocked(page))
BUG();
if (!page_has_buffers(page))
- return;
+ return 0;
/* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be
if (offset <= curr_off) {
/* This block is wholly outside the truncation point */
lock_buffer(bh);
- may_free &= journal_unmap_buffer(journal, bh,
- offset > 0);
+ ret = journal_unmap_buffer(journal, bh, offset > 0);
unlock_buffer(bh);
+ if (ret < 0)
+ return ret;
+ may_free &= ret;
}
curr_off = next_off;
bh = next;
if (may_free && try_to_free_buffers(page))
J_ASSERT(!page_has_buffers(page));
}
+ return 0;
}
/*