1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/memblock.h>
5 #include <linux/spinlock.h>
6 #include <asm/unaccepted_memory.h>
8 /* Protects unaccepted memory bitmap */
9 static DEFINE_SPINLOCK(unaccepted_memory_lock);
12 * accept_memory() -- Consult bitmap and accept the memory if needed.
14 * Only memory that is explicitly marked as unaccepted in the bitmap requires
15 * an action. All the remaining memory is implicitly accepted and doesn't need
19 * - anything if the system has no unaccepted table;
20 * - memory that is below phys_base;
21 * - memory that is above the memory that addressable by the bitmap;
23 void accept_memory(phys_addr_t start, phys_addr_t end)
25 struct efi_unaccepted_memory *unaccepted;
26 unsigned long range_start, range_end;
30 unaccepted = efi_get_unaccepted_table();
34 unit_size = unaccepted->unit_size;
37 * Only care for the part of the range that is represented
40 if (start < unaccepted->phys_base)
41 start = unaccepted->phys_base;
42 if (end < unaccepted->phys_base)
45 /* Translate to offsets from the beginning of the bitmap */
46 start -= unaccepted->phys_base;
47 end -= unaccepted->phys_base;
50 * load_unaligned_zeropad() can lead to unwanted loads across page
51 * boundaries. The unwanted loads are typically harmless. But, they
52 * might be made to totally unrelated or even unmapped memory.
53 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
54 * #VE) to recover from these unwanted loads.
56 * But, this approach does not work for unaccepted memory. For TDX, a
57 * load from unaccepted memory will not lead to a recoverable exception
58 * within the guest. The guest will exit to the VMM where the only
59 * recourse is to terminate the guest.
61 * There are two parts to fix this issue and comprehensively avoid
62 * access to unaccepted memory. Together these ensure that an extra
63 * "guard" page is accepted in addition to the memory that needs to be
66 * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
67 * checks up to end+unit_size if 'end' is aligned on a unit_size
70 * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
71 * 'end' is aligned on a unit_size boundary. (immediately following
74 if (!(end % unit_size))
77 /* Make sure not to overrun the bitmap */
78 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
79 end = unaccepted->size * unit_size * BITS_PER_BYTE;
81 range_start = start / unit_size;
83 spin_lock_irqsave(&unaccepted_memory_lock, flags);
84 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
85 DIV_ROUND_UP(end, unit_size)) {
86 unsigned long phys_start, phys_end;
87 unsigned long len = range_end - range_start;
89 phys_start = range_start * unit_size + unaccepted->phys_base;
90 phys_end = range_end * unit_size + unaccepted->phys_base;
92 arch_accept_memory(phys_start, phys_end);
93 bitmap_clear(unaccepted->bitmap, range_start, len);
95 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
98 bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
100 struct efi_unaccepted_memory *unaccepted;
105 unaccepted = efi_get_unaccepted_table();
109 unit_size = unaccepted->unit_size;
112 * Only care for the part of the range that is represented
115 if (start < unaccepted->phys_base)
116 start = unaccepted->phys_base;
117 if (end < unaccepted->phys_base)
120 /* Translate to offsets from the beginning of the bitmap */
121 start -= unaccepted->phys_base;
122 end -= unaccepted->phys_base;
125 * Also consider the unaccepted state of the *next* page. See fix #1 in
126 * the comment on load_unaligned_zeropad() in accept_memory().
128 if (!(end % unit_size))
131 /* Make sure not to overrun the bitmap */
132 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
133 end = unaccepted->size * unit_size * BITS_PER_BYTE;
135 spin_lock_irqsave(&unaccepted_memory_lock, flags);
136 while (start < end) {
137 if (test_bit(start / unit_size, unaccepted->bitmap)) {
144 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);