Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 2 Jan 2013 17:57:34 +0000 (09:57 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 2 Jan 2013 17:57:34 +0000 (09:57 -0800)
Pull ext4 bug fixes from Ted Ts'o:
 "Various bug fixes for ext4.  Perhaps the most serious bug fixed is one
  which could cause file system corruptions when performing file punch
  operations."

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: avoid hang when mounting non-journal filesystems with orphan list
  ext4: lock i_mutex when truncating orphan inodes
  ext4: do not try to write superblock on ro remount w/o journal
  ext4: include journal blocks in df overhead calcs
  ext4: remove unaligned AIO warning printk
  ext4: fix an incorrect comment about i_mutex
  ext4: fix deadlock in journal_unmap_buffer()
  ext4: split off ext4_journalled_invalidatepage()
  jbd2: fix assertion failure in jbd2_journal_flush()
  ext4: check dioread_nolock on remount
  ext4: fix extent tree corruption caused by hole punch

1  2 
fs/ext4/file.c
fs/jbd2/transaction.c

diff --combined fs/ext4/file.c
@@@ -108,14 -108,6 +108,6 @@@ ext4_file_dio_write(struct kiocb *iocb
  
        /* Unaligned direct AIO must be serialized; see comment above */
        if (unaligned_aio) {
-               static unsigned long unaligned_warn_time;
-               /* Warn about this once per day */
-               if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
-                       ext4_msg(inode->i_sb, KERN_WARNING,
-                                "Unaligned AIO/DIO on inode %ld by %s; "
-                                "performance will be poor.",
-                                inode->i_ino, current->comm);
                mutex_lock(ext4_aio_mutex(inode));
                ext4_unwritten_wait(inode);
        }
@@@ -303,7 -295,7 +295,7 @@@ static int ext4_file_open(struct inode 
   * page cache has data or not.
   */
  static int ext4_find_unwritten_pgoff(struct inode *inode,
 -                                   int origin,
 +                                   int whence,
                                     struct ext4_map_blocks *map,
                                     loff_t *offset)
  {
                nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
                                          (pgoff_t)num);
                if (nr_pages == 0) {
 -                      if (origin == SEEK_DATA)
 +                      if (whence == SEEK_DATA)
                                break;
  
 -                      BUG_ON(origin != SEEK_HOLE);
 +                      BUG_ON(whence != SEEK_HOLE);
                        /*
                         * If this is the first time to go into the loop and
                         * offset is not beyond the end offset, it will be a
                 * offset is smaller than the first page offset, it will be a
                 * hole at this offset.
                 */
 -              if (lastoff == startoff && origin == SEEK_HOLE &&
 +              if (lastoff == startoff && whence == SEEK_HOLE &&
                    lastoff < page_offset(pvec.pages[0])) {
                        found = 1;
                        break;
                         * If the current offset is not beyond the end of given
                         * range, it will be a hole.
                         */
 -                      if (lastoff < endoff && origin == SEEK_HOLE &&
 +                      if (lastoff < endoff && whence == SEEK_HOLE &&
                            page->index > end) {
                                found = 1;
                                *offset = lastoff;
                                do {
                                        if (buffer_uptodate(bh) ||
                                            buffer_unwritten(bh)) {
 -                                              if (origin == SEEK_DATA)
 +                                              if (whence == SEEK_DATA)
                                                        found = 1;
                                        } else {
 -                                              if (origin == SEEK_HOLE)
 +                                              if (whence == SEEK_HOLE)
                                                        found = 1;
                                        }
                                        if (found) {
                 * The no. of pages is less than our desired, that would be a
                 * hole in there.
                 */
 -              if (nr_pages < num && origin == SEEK_HOLE) {
 +              if (nr_pages < num && whence == SEEK_HOLE) {
                        found = 1;
                        *offset = lastoff;
                        break;
@@@ -609,7 -601,7 +601,7 @@@ static loff_t ext4_seek_hole(struct fil
   * by calling generic_file_llseek_size() with the appropriate maxbytes
   * value for each.
   */
 -loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
 +loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
  {
        struct inode *inode = file->f_mapping->host;
        loff_t maxbytes;
        else
                maxbytes = inode->i_sb->s_maxbytes;
  
 -      switch (origin) {
 +      switch (whence) {
        case SEEK_SET:
        case SEEK_CUR:
        case SEEK_END:
 -              return generic_file_llseek_size(file, offset, origin,
 +              return generic_file_llseek_size(file, offset, whence,
                                                maxbytes, i_size_read(inode));
        case SEEK_DATA:
                return ext4_seek_data(file, offset, maxbytes);
diff --combined fs/jbd2/transaction.c
@@@ -209,7 -209,8 +209,8 @@@ repeat
                if (!new_transaction)
                        goto alloc_transaction;
                write_lock(&journal->j_state_lock);
-               if (!journal->j_running_transaction) {
+               if (!journal->j_running_transaction &&
+                   !journal->j_barrier_count) {
                        jbd2_get_transaction(journal, new_transaction);
                        new_transaction = NULL;
                }
@@@ -1250,7 -1251,7 +1251,7 @@@ int jbd2_journal_forget (handle_t *hand
                goto not_jbd;
        }
  
 -      /* keep track of wether or not this transaction modified us */
 +      /* keep track of whether or not this transaction modified us */
        was_modified = jh->b_modified;
  
        /*
@@@ -1839,7 -1840,6 +1840,6 @@@ static int journal_unmap_buffer(journal
  
        BUFFER_TRACE(bh, "entry");
  
- retry:
        /*
         * It is safe to proceed here without the j_list_lock because the
         * buffers cannot be stolen by try_to_free_buffers as long as we are
                 * for commit and try again.
                 */
                if (partial_page) {
-                       tid_t tid = journal->j_committing_transaction->t_tid;
                        jbd2_journal_put_journal_head(jh);
                        spin_unlock(&journal->j_list_lock);
                        jbd_unlock_bh_state(bh);
                        write_unlock(&journal->j_state_lock);
-                       jbd2_log_wait_commit(journal, tid);
-                       goto retry;
+                       return -EBUSY;
                }
                /*
                 * OK, buffer won't be reachable after truncate. We just set
@@@ -2002,21 -1999,23 +1999,23 @@@ zap_buffer_unlocked
   * @page:    page to flush
   * @offset:  length of page to invalidate.
   *
-  * Reap page buffers containing data after offset in page.
-  *
+  * Reap page buffers containing data after offset in page. Can return -EBUSY
+  * if buffers are part of the committing transaction and the page is straddling
+  * i_size. Caller then has to wait for current commit and try again.
   */
void jbd2_journal_invalidatepage(journal_t *journal,
-                     struct page *page,
-                     unsigned long offset)
int jbd2_journal_invalidatepage(journal_t *journal,
+                               struct page *page,
+                               unsigned long offset)
  {
        struct buffer_head *head, *bh, *next;
        unsigned int curr_off = 0;
        int may_free = 1;
+       int ret = 0;
  
        if (!PageLocked(page))
                BUG();
        if (!page_has_buffers(page))
-               return;
+               return 0;
  
        /* We will potentially be playing with lists other than just the
         * data lists (especially for journaled data mode), so be
                if (offset <= curr_off) {
                        /* This block is wholly outside the truncation point */
                        lock_buffer(bh);
-                       may_free &= journal_unmap_buffer(journal, bh,
-                                                        offset > 0);
+                       ret = journal_unmap_buffer(journal, bh, offset > 0);
                        unlock_buffer(bh);
+                       if (ret < 0)
+                               return ret;
+                       may_free &= ret;
                }
                curr_off = next_off;
                bh = next;
                if (may_free && try_to_free_buffers(page))
                        J_ASSERT(!page_has_buffers(page));
        }
+       return 0;
  }
  
  /*