Merge tag 'iommu-updates-v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro...
[platform/kernel/linux-starfive.git] / kernel / fork.c
index 89b8b6c..9f7fe35 100644 (file)
@@ -756,8 +756,13 @@ static void check_mm(struct mm_struct *mm)
                         "Please make sure 'struct resident_page_types[]' is updated as well");
 
        for (i = 0; i < NR_MM_COUNTERS; i++) {
-               long x = atomic_long_read(&mm->rss_stat.count[i]);
+               long x = percpu_counter_sum(&mm->rss_stat[i]);
 
+               if (likely(!x))
+                       continue;
+
+               /* Making sure this is not due to race with CPU offlining. */
+               x = percpu_counter_sum_all(&mm->rss_stat[i]);
                if (unlikely(x))
                        pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
                                 mm, resident_page_types[i], x);
@@ -782,6 +787,8 @@ static void check_mm(struct mm_struct *mm)
  */
 void __mmdrop(struct mm_struct *mm)
 {
+       int i;
+
        BUG_ON(mm == &init_mm);
        WARN_ON_ONCE(mm == current->mm);
        WARN_ON_ONCE(mm == current->active_mm);
@@ -791,6 +798,9 @@ void __mmdrop(struct mm_struct *mm)
        check_mm(mm);
        put_user_ns(mm->user_ns);
        mm_pasid_drop(mm);
+
+       for (i = 0; i < NR_MM_COUNTERS; i++)
+               percpu_counter_destroy(&mm->rss_stat[i]);
        free_mm(mm);
 }
 EXPORT_SYMBOL_GPL(__mmdrop);
@@ -1110,6 +1120,8 @@ static void mm_init_uprobes_state(struct mm_struct *mm)
 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        struct user_namespace *user_ns)
 {
+       int i;
+
        mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
        mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
        atomic_set(&mm->mm_users, 1);
@@ -1151,10 +1163,17 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        if (init_new_context(p, mm))
                goto fail_nocontext;
 
+       for (i = 0; i < NR_MM_COUNTERS; i++)
+               if (percpu_counter_init(&mm->rss_stat[i], 0, GFP_KERNEL_ACCOUNT))
+                       goto fail_pcpu;
+
        mm->user_ns = get_user_ns(user_ns);
        lru_gen_init_mm(mm);
        return mm;
 
+fail_pcpu:
+       while (i > 0)
+               percpu_counter_destroy(&mm->rss_stat[--i]);
 fail_nocontext:
        mm_free_pgd(mm);
 fail_nopgd:
@@ -2588,11 +2607,6 @@ struct task_struct * __init fork_idle(int cpu)
        return task;
 }
 
-struct mm_struct *copy_init_mm(void)
-{
-       return dup_mm(NULL, &init_mm);
-}
-
 /*
  * This is like kernel_clone(), but shaved down and tailored to just
  * creating io_uring workers. It returns a created task, or an error pointer.
@@ -3011,10 +3025,27 @@ static void sighand_ctor(void *data)
        init_waitqueue_head(&sighand->signalfd_wqh);
 }
 
-void __init proc_caches_init(void)
+void __init mm_cache_init(void)
 {
        unsigned int mm_size;
 
+       /*
+        * The mm_cpumask is located at the end of mm_struct, and is
+        * dynamically sized based on the maximum CPU number this system
+        * can have, taking hotplug into account (nr_cpu_ids).
+        */
+       mm_size = sizeof(struct mm_struct) + cpumask_size();
+
+       mm_cachep = kmem_cache_create_usercopy("mm_struct",
+                       mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
+                       offsetof(struct mm_struct, saved_auxv),
+                       sizeof_field(struct mm_struct, saved_auxv),
+                       NULL);
+}
+
+void __init proc_caches_init(void)
+{
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
@@ -3032,19 +3063,6 @@ void __init proc_caches_init(void)
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
                        NULL);
 
-       /*
-        * The mm_cpumask is located at the end of mm_struct, and is
-        * dynamically sized based on the maximum CPU number this system
-        * can have, taking hotplug into account (nr_cpu_ids).
-        */
-       mm_size = sizeof(struct mm_struct) + cpumask_size();
-
-       mm_cachep = kmem_cache_create_usercopy("mm_struct",
-                       mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
-                       offsetof(struct mm_struct, saved_auxv),
-                       sizeof_field(struct mm_struct, saved_auxv),
-                       NULL);
        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
        mmap_init();
        nsproxy_cache_init();