Merge branch 'akpm' (patches from Andrew)
[platform/kernel/linux-rpi.git] / fs / proc / vmcore.c
index cdbbf81..30a3b66 100644 (file)
@@ -62,46 +62,75 @@ core_param(novmcoredd, vmcoredd_disabled, bool, 0);
 /* Device Dump Size */
 static size_t vmcoredd_orig_sz;
 
-/*
- * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
- * The called function has to take care of module refcounting.
- */
-static int (*oldmem_pfn_is_ram)(unsigned long pfn);
-
-int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
+static DECLARE_RWSEM(vmcore_cb_rwsem);
+/* List of registered vmcore callbacks. */
+static LIST_HEAD(vmcore_cb_list);
+/* Whether we had a surprise unregistration of a callback. */
+static bool vmcore_cb_unstable;
+/* Whether the vmcore has been opened once. */
+static bool vmcore_opened;
+
+void register_vmcore_cb(struct vmcore_cb *cb)
 {
-       if (oldmem_pfn_is_ram)
-               return -EBUSY;
-       oldmem_pfn_is_ram = fn;
-       return 0;
+       down_write(&vmcore_cb_rwsem);
+       INIT_LIST_HEAD(&cb->next);
+       list_add_tail(&cb->next, &vmcore_cb_list);
+       /*
+        * Registering a vmcore callback after the vmcore was opened is
+        * very unusual (e.g., manual driver loading).
+        */
+       if (vmcore_opened)
+               pr_warn_once("Unexpected vmcore callback registration\n");
+       up_write(&vmcore_cb_rwsem);
 }
-EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
+EXPORT_SYMBOL_GPL(register_vmcore_cb);
 
-void unregister_oldmem_pfn_is_ram(void)
+void unregister_vmcore_cb(struct vmcore_cb *cb)
 {
-       oldmem_pfn_is_ram = NULL;
-       wmb();
+       down_write(&vmcore_cb_rwsem);
+       list_del(&cb->next);
+       /*
+        * Unregistering a vmcore callback after the vmcore was opened is
+        * very unusual (e.g., forced driver removal), but we cannot stop
+        * unregistering.
+        */
+       if (vmcore_opened) {
+               pr_warn_once("Unexpected vmcore callback unregistration\n");
+               vmcore_cb_unstable = true;
+       }
+       up_write(&vmcore_cb_rwsem);
 }
-EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
+EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
 
-static int pfn_is_ram(unsigned long pfn)
+static bool pfn_is_ram(unsigned long pfn)
 {
-       int (*fn)(unsigned long pfn);
-       /* pfn is ram unless fn() checks pagetype */
-       int ret = 1;
+       struct vmcore_cb *cb;
+       bool ret = true;
 
-       /*
-        * Ask hypervisor if the pfn is really ram.
-        * A ballooned page contains no data and reading from such a page
-        * will cause high load in the hypervisor.
-        */
-       fn = oldmem_pfn_is_ram;
-       if (fn)
-               ret = fn(pfn);
+       lockdep_assert_held_read(&vmcore_cb_rwsem);
+       if (unlikely(vmcore_cb_unstable))
+               return false;
+
+       list_for_each_entry(cb, &vmcore_cb_list, next) {
+               if (unlikely(!cb->pfn_is_ram))
+                       continue;
+               ret = cb->pfn_is_ram(cb, pfn);
+               if (!ret)
+                       break;
+       }
 
        return ret;
 }
 
+static int open_vmcore(struct inode *inode, struct file *file)
+{
+       down_read(&vmcore_cb_rwsem);
+       vmcore_opened = true;
+       up_read(&vmcore_cb_rwsem);
+
+       return 0;
+}
+
 /* Reads a page from the oldmem device from given offset. */
 ssize_t read_from_oldmem(char *buf, size_t count,
                         u64 *ppos, int userbuf,
@@ -117,6 +146,7 @@ ssize_t read_from_oldmem(char *buf, size_t count,
        offset = (unsigned long)(*ppos % PAGE_SIZE);
        pfn = (unsigned long)(*ppos / PAGE_SIZE);
 
+       down_read(&vmcore_cb_rwsem);
        do {
                if (count > (PAGE_SIZE - offset))
                        nr_bytes = PAGE_SIZE - offset;
@@ -124,7 +154,7 @@ ssize_t read_from_oldmem(char *buf, size_t count,
                        nr_bytes = count;
 
                /* If pfn is not ram, return zeros for sparse dump files */
-               if (pfn_is_ram(pfn) == 0)
+               if (!pfn_is_ram(pfn))
                        memset(buf, 0, nr_bytes);
                else {
                        if (encrypted)
@@ -136,8 +166,10 @@ ssize_t read_from_oldmem(char *buf, size_t count,
                                tmp = copy_oldmem_page(pfn, buf, nr_bytes,
                                                       offset, userbuf);
 
-                       if (tmp < 0)
+                       if (tmp < 0) {
+                               up_read(&vmcore_cb_rwsem);
                                return tmp;
+                       }
                }
                *ppos += nr_bytes;
                count -= nr_bytes;
@@ -147,6 +179,7 @@ ssize_t read_from_oldmem(char *buf, size_t count,
                offset = 0;
        } while (count);
 
+       up_read(&vmcore_cb_rwsem);
        return read;
 }
 
@@ -537,14 +570,19 @@ static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
                            unsigned long from, unsigned long pfn,
                            unsigned long size, pgprot_t prot)
 {
+       int ret;
+
        /*
         * Check if oldmem_pfn_is_ram was registered to avoid
         * looping over all pages without a reason.
         */
-       if (oldmem_pfn_is_ram)
-               return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
+       down_read(&vmcore_cb_rwsem);
+       if (!list_empty(&vmcore_cb_list) || vmcore_cb_unstable)
+               ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
        else
-               return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
+               ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
+       up_read(&vmcore_cb_rwsem);
+       return ret;
 }
 
 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
@@ -668,6 +706,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 #endif
 
 static const struct proc_ops vmcore_proc_ops = {
+       .proc_open      = open_vmcore,
        .proc_read      = read_vmcore,
        .proc_lseek     = default_llseek,
        .proc_mmap      = mmap_vmcore,