Merge tag 'gfs2-4.11.addendum' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Feb 2017 17:36:04 +0000 (09:36 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Feb 2017 17:36:04 +0000 (09:36 -0800)
Pull GFS2 fix from Bob Peterson:
 "This is an addendum for the 4.11 merge window.

  Andy Price wrote this patch to close a nasty race condition that
  allows access to glocks that are being destroyed. Without this patch,
  GFS2 is vulnerable to random corruption and kernel panic"

* tag 'gfs2-4.11.addendum' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  gfs2: Add missing rcu locking for glock lookup

1  2 
fs/gfs2/glock.c

diff --combined fs/gfs2/glock.c
@@@ -21,7 -21,7 +21,7 @@@
  #include <linux/list.h>
  #include <linux/wait.h>
  #include <linux/module.h>
 -#include <asm/uaccess.h>
 +#include <linux/uaccess.h>
  #include <linux/seq_file.h>
  #include <linux/debugfs.h>
  #include <linux/kthread.h>
@@@ -658,9 -658,11 +658,11 @@@ int gfs2_glock_get(struct gfs2_sbd *sdp
        struct kmem_cache *cachep;
        int ret, tries = 0;
  
+       rcu_read_lock();
        gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
        if (gl && !lockref_get_not_dead(&gl->gl_lockref))
                gl = NULL;
+       rcu_read_unlock();
  
        *glp = gl;
        if (gl)
        gl->gl_target = LM_ST_UNLOCKED;
        gl->gl_demote_state = LM_ST_EXCLUSIVE;
        gl->gl_ops = glops;
 -      gl->gl_dstamp = ktime_set(0, 0);
 +      gl->gl_dstamp = 0;
        preempt_disable();
        /* We use the global stats to estimate the initial per-glock stats */
        gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
@@@ -728,15 -730,18 +730,18 @@@ again
  
        if (ret == -EEXIST) {
                ret = 0;
+               rcu_read_lock();
                tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
                if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
                        if (++tries < 100) {
+                               rcu_read_unlock();
                                cond_resched();
                                goto again;
                        }
                        tmp = NULL;
                        ret = -ENOMEM;
                }
+               rcu_read_unlock();
        } else {
                WARN_ON_ONCE(ret);
        }
@@@ -1420,32 -1425,26 +1425,32 @@@ static struct shrinker glock_shrinker 
   * @sdp: the filesystem
   * @bucket: the bucket
   *
 + * Note that the function can be called multiple times on the same
 + * object.  So the user must ensure that the function can cope with
 + * that.
   */
  
  static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
  {
        struct gfs2_glock *gl;
 -      struct rhash_head *pos;
 -      const struct bucket_table *tbl;
 -      int i;
 +      struct rhashtable_iter iter;
  
 -      rcu_read_lock();
 -      tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
 -      for (i = 0; i < tbl->size; i++) {
 -              rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
 +      rhashtable_walk_enter(&gl_hash_table, &iter);
 +
 +      do {
 +              gl = ERR_PTR(rhashtable_walk_start(&iter));
 +              if (gl)
 +                      continue;
 +
 +              while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
                        if ((gl->gl_name.ln_sbd == sdp) &&
                            lockref_get_not_dead(&gl->gl_lockref))
                                examiner(gl);
 -              }
 -      }
 -      rcu_read_unlock();
 -      cond_resched();
 +
 +              rhashtable_walk_stop(&iter);
 +      } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
 +
 +      rhashtable_walk_exit(&iter);
  }
  
  /**