Merge tag 'backport/v3.14.24-ltsi-rc1/rcar-snd-to-next-20141121' into backport/v3...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / namespace.c
index 22e5367..d9bf3ef 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/proc_ns.h>
 #include <linux/magic.h>
+#include <linux/bootmem.h>
 #include "pnode.h"
 #include "internal.h"
 
-#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
-#define HASH_SIZE (1UL << HASH_SHIFT)
+static unsigned int m_hash_mask __read_mostly;
+static unsigned int m_hash_shift __read_mostly;
+static unsigned int mp_hash_mask __read_mostly;
+static unsigned int mp_hash_shift __read_mostly;
+
+static __initdata unsigned long mhash_entries;
+static int __init set_mhash_entries(char *str)
+{
+       if (!str)
+               return 0;
+       mhash_entries = simple_strtoul(str, &str, 0);
+       return 1;
+}
+__setup("mhash_entries=", set_mhash_entries);
+
+static __initdata unsigned long mphash_entries;
+static int __init set_mphash_entries(char *str)
+{
+       if (!str)
+               return 0;
+       mphash_entries = simple_strtoul(str, &str, 0);
+       return 1;
+}
+__setup("mphash_entries=", set_mphash_entries);
 
 static int event;
 static DEFINE_IDA(mnt_id_ida);
@@ -36,8 +59,8 @@ static DEFINE_SPINLOCK(mnt_id_lock);
 static int mnt_id_start = 0;
 static int mnt_group_start = 1;
 
-static struct list_head *mount_hashtable __read_mostly;
-static struct list_head *mountpoint_hashtable __read_mostly;
+static struct hlist_head *mount_hashtable __read_mostly;
+static struct hlist_head *mountpoint_hashtable __read_mostly;
 static struct kmem_cache *mnt_cache __read_mostly;
 static DECLARE_RWSEM(namespace_sem);
 
@@ -55,12 +78,19 @@ EXPORT_SYMBOL_GPL(fs_kobj);
  */
 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
 
-static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
+static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
 {
        unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
        tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
-       tmp = tmp + (tmp >> HASH_SHIFT);
-       return tmp & (HASH_SIZE - 1);
+       tmp = tmp + (tmp >> m_hash_shift);
+       return &mount_hashtable[tmp & m_hash_mask];
+}
+
+static inline struct hlist_head *mp_hash(struct dentry *dentry)
+{
+       unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
+       tmp = tmp + (tmp >> mp_hash_shift);
+       return &mountpoint_hashtable[tmp & mp_hash_mask];
 }
 
 /*
@@ -187,7 +217,7 @@ static struct mount *alloc_vfsmnt(const char *name)
                mnt->mnt_writers = 0;
 #endif
 
-               INIT_LIST_HEAD(&mnt->mnt_hash);
+               INIT_HLIST_NODE(&mnt->mnt_hash);
                INIT_LIST_HEAD(&mnt->mnt_child);
                INIT_LIST_HEAD(&mnt->mnt_mounts);
                INIT_LIST_HEAD(&mnt->mnt_list);
@@ -575,10 +605,10 @@ bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
  */
 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
 {
-       struct list_head *head = mount_hashtable + hash(mnt, dentry);
+       struct hlist_head *head = m_hash(mnt, dentry);
        struct mount *p;
 
-       list_for_each_entry_rcu(p, head, mnt_hash)
+       hlist_for_each_entry_rcu(p, head, mnt_hash)
                if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
                        return p;
        return NULL;
@@ -590,13 +620,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
  */
 struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
 {
-       struct list_head *head = mount_hashtable + hash(mnt, dentry);
-       struct mount *p;
-
-       list_for_each_entry_reverse(p, head, mnt_hash)
-               if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
-                       return p;
-       return NULL;
+       struct mount *p, *res;
+       res = p = __lookup_mnt(mnt, dentry);
+       if (!p)
+               goto out;
+       hlist_for_each_entry_continue(p, mnt_hash) {
+               if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
+                       break;
+               res = p;
+       }
+out:
+       return res;
 }
 
 /*
@@ -633,11 +667,11 @@ struct vfsmount *lookup_mnt(struct path *path)
 
 static struct mountpoint *new_mountpoint(struct dentry *dentry)
 {
-       struct list_head *chain = mountpoint_hashtable + hash(NULL, dentry);
+       struct hlist_head *chain = mp_hash(dentry);
        struct mountpoint *mp;
        int ret;
 
-       list_for_each_entry(mp, chain, m_hash) {
+       hlist_for_each_entry(mp, chain, m_hash) {
                if (mp->m_dentry == dentry) {
                        /* might be worth a WARN_ON() */
                        if (d_unlinked(dentry))
@@ -659,7 +693,7 @@ static struct mountpoint *new_mountpoint(struct dentry *dentry)
 
        mp->m_dentry = dentry;
        mp->m_count = 1;
-       list_add(&mp->m_hash, chain);
+       hlist_add_head(&mp->m_hash, chain);
        return mp;
 }
 
@@ -670,7 +704,7 @@ static void put_mountpoint(struct mountpoint *mp)
                spin_lock(&dentry->d_lock);
                dentry->d_flags &= ~DCACHE_MOUNTED;
                spin_unlock(&dentry->d_lock);
-               list_del(&mp->m_hash);
+               hlist_del(&mp->m_hash);
                kfree(mp);
        }
 }
@@ -712,7 +746,7 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
        mnt->mnt_parent = mnt;
        mnt->mnt_mountpoint = mnt->mnt.mnt_root;
        list_del_init(&mnt->mnt_child);
-       list_del_init(&mnt->mnt_hash);
+       hlist_del_init_rcu(&mnt->mnt_hash);
        put_mountpoint(mnt->mnt_mp);
        mnt->mnt_mp = NULL;
 }
@@ -739,15 +773,28 @@ static void attach_mnt(struct mount *mnt,
                        struct mountpoint *mp)
 {
        mnt_set_mountpoint(parent, mp, mnt);
-       list_add_tail(&mnt->mnt_hash, mount_hashtable +
-                       hash(&parent->mnt, mp->m_dentry));
+       hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
        list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
 }
 
+static void attach_shadowed(struct mount *mnt,
+                       struct mount *parent,
+                       struct mount *shadows)
+{
+       if (shadows) {
+               hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
+               list_add(&mnt->mnt_child, &shadows->mnt_child);
+       } else {
+               hlist_add_head_rcu(&mnt->mnt_hash,
+                               m_hash(&parent->mnt, mnt->mnt_mountpoint));
+               list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+       }
+}
+
 /*
  * vfsmount lock must be held for write
  */
-static void commit_tree(struct mount *mnt)
+static void commit_tree(struct mount *mnt, struct mount *shadows)
 {
        struct mount *parent = mnt->mnt_parent;
        struct mount *m;
@@ -762,9 +809,7 @@ static void commit_tree(struct mount *mnt)
 
        list_splice(&head, n->list.prev);
 
-       list_add_tail(&mnt->mnt_hash, mount_hashtable +
-                               hash(&parent->mnt, mnt->mnt_mountpoint));
-       list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+       attach_shadowed(mnt, parent, shadows);
        touch_mnt_namespace(n);
 }
 
@@ -849,10 +894,23 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
                        goto out_free;
        }
 
-       mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
+       mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
        /* Don't allow unprivileged users to change mount flags */
-       if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
-               mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
+       if (flag & CL_UNPRIVILEGED) {
+               mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
+
+               if (mnt->mnt.mnt_flags & MNT_READONLY)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
+
+               if (mnt->mnt.mnt_flags & MNT_NODEV)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
+
+               if (mnt->mnt.mnt_flags & MNT_NOSUID)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
+
+               if (mnt->mnt.mnt_flags & MNT_NOEXEC)
+                       mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
+       }
 
        /* Don't allow unprivileged users to reveal what is under a mount */
        if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
@@ -1153,26 +1211,33 @@ int may_umount(struct vfsmount *mnt)
 
 EXPORT_SYMBOL(may_umount);
 
-static LIST_HEAD(unmounted);   /* protected by namespace_sem */
+static HLIST_HEAD(unmounted);  /* protected by namespace_sem */
 
 static void namespace_unlock(void)
 {
        struct mount *mnt;
-       LIST_HEAD(head);
+       struct hlist_head head = unmounted;
 
-       if (likely(list_empty(&unmounted))) {
+       if (likely(hlist_empty(&head))) {
                up_write(&namespace_sem);
                return;
        }
 
-       list_splice_init(&unmounted, &head);
+       head.first->pprev = &head.first;
+       INIT_HLIST_HEAD(&unmounted);
+
+       /* undo decrements we'd done in umount_tree() */
+       hlist_for_each_entry(mnt, &head, mnt_hash)
+               if (mnt->mnt_ex_mountpoint.mnt)
+                       mntget(mnt->mnt_ex_mountpoint.mnt);
+
        up_write(&namespace_sem);
 
        synchronize_rcu();
 
-       while (!list_empty(&head)) {
-               mnt = list_first_entry(&head, struct mount, mnt_hash);
-               list_del_init(&mnt->mnt_hash);
+       while (!hlist_empty(&head)) {
+               mnt = hlist_entry(head.first, struct mount, mnt_hash);
+               hlist_del_init(&mnt->mnt_hash);
                if (mnt->mnt_ex_mountpoint.mnt)
                        path_put(&mnt->mnt_ex_mountpoint);
                mntput(&mnt->mnt);
@@ -1193,25 +1258,31 @@ static inline void namespace_lock(void)
  */
 void umount_tree(struct mount *mnt, int how)
 {
-       LIST_HEAD(tmp_list);
+       HLIST_HEAD(tmp_list);
        struct mount *p;
+       struct mount *last = NULL;
 
-       for (p = mnt; p; p = next_mnt(p, mnt))
-               list_move(&p->mnt_hash, &tmp_list);
+       for (p = mnt; p; p = next_mnt(p, mnt)) {
+               hlist_del_init_rcu(&p->mnt_hash);
+               hlist_add_head(&p->mnt_hash, &tmp_list);
+       }
+
+       hlist_for_each_entry(p, &tmp_list, mnt_hash)
+               list_del_init(&p->mnt_child);
 
        if (how)
                propagate_umount(&tmp_list);
 
-       list_for_each_entry(p, &tmp_list, mnt_hash) {
+       hlist_for_each_entry(p, &tmp_list, mnt_hash) {
                list_del_init(&p->mnt_expire);
                list_del_init(&p->mnt_list);
                __touch_mnt_namespace(p->mnt_ns);
                p->mnt_ns = NULL;
                if (how < 2)
                        p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
-               list_del_init(&p->mnt_child);
                if (mnt_has_parent(p)) {
                        put_mountpoint(p->mnt_mp);
+                       mnt_add_count(p->mnt_parent, -1);
                        /* move the reference to mountpoint into ->mnt_ex_mountpoint */
                        p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
                        p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
@@ -1220,8 +1291,13 @@ void umount_tree(struct mount *mnt, int how)
                        p->mnt_mp = NULL;
                }
                change_mnt_propagation(p, MS_PRIVATE);
+               last = p;
+       }
+       if (last) {
+               last->mnt_hash.next = unmounted.first;
+               unmounted.first = tmp_list.first;
+               unmounted.first->pprev = &unmounted.first;
        }
-       list_splice(&tmp_list, &unmounted);
 }
 
 static void shrink_submounts(struct mount *mnt);
@@ -1289,6 +1365,8 @@ static int do_umount(struct mount *mnt, int flags)
                 * Special case for "unmounting" root ...
                 * we just try to remount it readonly.
                 */
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
                down_write(&sb->s_umount);
                if (!(sb->s_flags & MS_RDONLY))
                        retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
@@ -1437,6 +1515,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
                        continue;
 
                for (s = r; s; s = next_mnt(s, r)) {
+                       struct mount *t = NULL;
                        if (!(flag & CL_COPY_UNBINDABLE) &&
                            IS_MNT_UNBINDABLE(s)) {
                                s = skip_mnt_tree(s);
@@ -1458,7 +1537,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
                                goto out;
                        lock_mount_hash();
                        list_add_tail(&q->mnt_list, &res->mnt_list);
-                       attach_mnt(q, parent, p->mnt_mp);
+                       mnt_set_mountpoint(parent, p->mnt_mp, q);
+                       if (!list_empty(&parent->mnt_mounts)) {
+                               t = list_last_entry(&parent->mnt_mounts,
+                                       struct mount, mnt_child);
+                               if (t->mnt_mp != p->mnt_mp)
+                                       t = NULL;
+                       }
+                       attach_shadowed(q, parent, t);
                        unlock_mount_hash();
                }
        }
@@ -1605,24 +1691,23 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                        struct mountpoint *dest_mp,
                        struct path *parent_path)
 {
-       LIST_HEAD(tree_list);
+       HLIST_HEAD(tree_list);
        struct mount *child, *p;
+       struct hlist_node *n;
        int err;
 
        if (IS_MNT_SHARED(dest_mnt)) {
                err = invent_group_ids(source_mnt, true);
                if (err)
                        goto out;
-       }
-       err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
-       if (err)
-               goto out_cleanup_ids;
-
-       lock_mount_hash();
-
-       if (IS_MNT_SHARED(dest_mnt)) {
+               err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
+               lock_mount_hash();
+               if (err)
+                       goto out_cleanup_ids;
                for (p = source_mnt; p; p = next_mnt(p, source_mnt))
                        set_mnt_shared(p);
+       } else {
+               lock_mount_hash();
        }
        if (parent_path) {
                detach_mnt(source_mnt, parent_path);
@@ -1630,20 +1715,27 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                touch_mnt_namespace(source_mnt->mnt_ns);
        } else {
                mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
-               commit_tree(source_mnt);
+               commit_tree(source_mnt, NULL);
        }
 
-       list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
-               list_del_init(&child->mnt_hash);
-               commit_tree(child);
+       hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
+               struct mount *q;
+               hlist_del_init(&child->mnt_hash);
+               q = __lookup_mnt_last(&child->mnt_parent->mnt,
+                                     child->mnt_mountpoint);
+               commit_tree(child, q);
        }
        unlock_mount_hash();
 
        return 0;
 
  out_cleanup_ids:
-       if (IS_MNT_SHARED(dest_mnt))
-               cleanup_group_ids(source_mnt, NULL);
+       while (!hlist_empty(&tree_list)) {
+               child = hlist_entry(tree_list.first, struct mount, mnt_hash);
+               umount_tree(child, 0);
+       }
+       unlock_mount_hash();
+       cleanup_group_ids(source_mnt, NULL);
  out:
        return err;
 }
@@ -1835,9 +1927,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
        if (readonly_request == __mnt_is_readonly(mnt))
                return 0;
 
-       if (mnt->mnt_flags & MNT_LOCK_READONLY)
-               return -EPERM;
-
        if (readonly_request)
                error = mnt_make_readonly(real_mount(mnt));
        else
@@ -1863,6 +1952,33 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
        if (path->dentry != path->mnt->mnt_root)
                return -EINVAL;
 
+       /* Don't allow changing of locked mnt flags.
+        *
+        * No locks need to be held here while testing the various
+        * MNT_LOCK flags because those flags can never be cleared
+        * once they are set.
+        */
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
+           !(mnt_flags & MNT_READONLY)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
+           !(mnt_flags & MNT_NODEV)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
+           !(mnt_flags & MNT_NOSUID)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
+           !(mnt_flags & MNT_NOEXEC)) {
+               return -EPERM;
+       }
+       if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
+           ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
+               return -EPERM;
+       }
+
        err = security_sb_remount(sb, data);
        if (err)
                return err;
@@ -1876,7 +1992,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
                err = do_remount_sb(sb, flags, data, 0);
        if (!err) {
                lock_mount_hash();
-               mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
+               mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
                mnt->mnt.mnt_flags = mnt_flags;
                touch_mnt_namespace(mnt->mnt_ns);
                unlock_mount_hash();
@@ -1997,7 +2113,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
        struct mount *parent;
        int err;
 
-       mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT);
+       mnt_flags &= ~MNT_INTERNAL_FLAGS;
 
        mp = lock_mount(path);
        if (IS_ERR(mp))
@@ -2061,7 +2177,7 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
                 */
                if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
                        flags |= MS_NODEV;
-                       mnt_flags |= MNT_NODEV;
+                       mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
                }
        }
 
@@ -2375,6 +2491,14 @@ long do_mount(const char *dev_name, const char *dir_name,
        if (flags & MS_RDONLY)
                mnt_flags |= MNT_READONLY;
 
+       /* The default atime for remount is preservation */
+       if ((flags & MS_REMOUNT) &&
+           ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
+                      MS_STRICTATIME)) == 0)) {
+               mnt_flags &= ~MNT_ATIME_MASK;
+               mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
+       }
+
        flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
                   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
                   MS_STRICTATIME);
@@ -2707,6 +2831,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        /* make sure we can reach put_old from new_root */
        if (!is_path_reachable(old_mnt, old.dentry, &new))
                goto out4;
+       /* make certain new is below the root */
+       if (!is_path_reachable(new_mnt, new.dentry, &root))
+               goto out4;
        root_mp->m_count++; /* pin it so it won't go away */
        lock_mount_hash();
        detach_mnt(new_mnt, &parent_path);
@@ -2777,18 +2904,24 @@ void __init mnt_init(void)
        mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 
-       mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
-       mountpoint_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
+       mount_hashtable = alloc_large_system_hash("Mount-cache",
+                               sizeof(struct hlist_head),
+                               mhash_entries, 19,
+                               0,
+                               &m_hash_shift, &m_hash_mask, 0, 0);
+       mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
+                               sizeof(struct hlist_head),
+                               mphash_entries, 19,
+                               0,
+                               &mp_hash_shift, &mp_hash_mask, 0, 0);
 
        if (!mount_hashtable || !mountpoint_hashtable)
                panic("Failed to allocate mount hash table\n");
 
-       printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE);
-
-       for (u = 0; u < HASH_SIZE; u++)
-               INIT_LIST_HEAD(&mount_hashtable[u]);
-       for (u = 0; u < HASH_SIZE; u++)
-               INIT_LIST_HEAD(&mountpoint_hashtable[u]);
+       for (u = 0; u <= m_hash_mask; u++)
+               INIT_HLIST_HEAD(&mount_hashtable[u]);
+       for (u = 0; u <= mp_hash_mask; u++)
+               INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
 
        kernfs_init();