efi/unaccepted: Fix soft lockups caused by parallel memory acceptance
[platform/kernel/linux-rpi.git] / drivers / firmware / efi / unaccepted_memory.c
index 853f7dc..135278d 100644 (file)
@@ -5,9 +5,17 @@
 #include <linux/spinlock.h>
 #include <asm/unaccepted_memory.h>
 
-/* Protects unaccepted memory bitmap */
+/* Protects unaccepted memory bitmap and accepting_list */
 static DEFINE_SPINLOCK(unaccepted_memory_lock);
 
+struct accept_range {
+       struct list_head list;
+       unsigned long start;
+       unsigned long end;
+};
+
+static LIST_HEAD(accepting_list);
+
 /*
  * accept_memory() -- Consult bitmap and accept the memory if needed.
  *
@@ -24,6 +32,7 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
 {
        struct efi_unaccepted_memory *unaccepted;
        unsigned long range_start, range_end;
+       struct accept_range range, *entry;
        unsigned long flags;
        u64 unit_size;
 
@@ -78,20 +87,67 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
        if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
                end = unaccepted->size * unit_size * BITS_PER_BYTE;
 
-       range_start = start / unit_size;
-
+       range.start = start / unit_size;
+       range.end = DIV_ROUND_UP(end, unit_size);
+retry:
        spin_lock_irqsave(&unaccepted_memory_lock, flags);
+
+       /*
+        * Check if anybody works on accepting the same range of the memory.
+        *
+        * The check is done with unit_size granularity. It is crucial to catch
+        * all accept requests to the same unit_size block, even if they don't
+        * overlap on physical address level.
+        */
+       list_for_each_entry(entry, &accepting_list, list) {
+               if (entry->end < range.start)
+                       continue;
+               if (entry->start >= range.end)
+                       continue;
+
+               /*
+                * Somebody else accepting the range. Or at least part of it.
+                *
+                * Drop the lock and retry until it is complete.
+                */
+               spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+               goto retry;
+       }
+
+       /*
+        * Register that the range is about to be accepted.
+        * Make sure nobody else will accept it.
+        */
+       list_add(&range.list, &accepting_list);
+
+       range_start = range.start;
        for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
-                                  DIV_ROUND_UP(end, unit_size)) {
+                                  range.end) {
                unsigned long phys_start, phys_end;
                unsigned long len = range_end - range_start;
 
                phys_start = range_start * unit_size + unaccepted->phys_base;
                phys_end = range_end * unit_size + unaccepted->phys_base;
 
+               /*
+                * Keep interrupts disabled until the accept operation is
+                * complete in order to prevent deadlocks.
+                *
+                * Enabling interrupts before calling arch_accept_memory()
+                * creates an opportunity for an interrupt handler to request
+                * acceptance for the same memory. The handler will continuously
+                * spin with interrupts disabled, preventing other task from
+                * making progress with the acceptance process.
+                */
+               spin_unlock(&unaccepted_memory_lock);
+
                arch_accept_memory(phys_start, phys_end);
+
+               spin_lock(&unaccepted_memory_lock);
                bitmap_clear(unaccepted->bitmap, range_start, len);
        }
+
+       list_del(&range.list);
        spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
 }