1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio-mem device driver.
5 * Copyright Red Hat, Inc. 2020
7 * Author(s): David Hildenbrand <david@redhat.com>
10 #include <linux/virtio.h>
11 #include <linux/virtio_mem.h>
12 #include <linux/workqueue.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include <linux/memory_hotplug.h>
17 #include <linux/memory.h>
18 #include <linux/hrtimer.h>
19 #include <linux/crash_dump.h>
20 #include <linux/mutex.h>
21 #include <linux/bitmap.h>
22 #include <linux/lockdep.h>
24 #include <acpi/acpi_numa.h>
26 static bool unplug_online = true;
27 module_param(unplug_online, bool, 0644);
28 MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
30 static bool force_bbm;
31 module_param(force_bbm, bool, 0444);
32 MODULE_PARM_DESC(force_bbm,
33 "Force Big Block Mode. Default is 0 (auto-selection)");
35 static unsigned long bbm_block_size;
36 module_param(bbm_block_size, ulong, 0444);
37 MODULE_PARM_DESC(bbm_block_size,
38 "Big Block size in bytes. Default is 0 (auto-detection).");
40 static bool bbm_safe_unplug = true;
41 module_param(bbm_safe_unplug, bool, 0444);
42 MODULE_PARM_DESC(bbm_safe_unplug,
43 "Use a safe unplug mechanism in BBM, avoiding long/endless loops");
46 * virtio-mem currently supports the following modes of operation:
48 * * Sub Block Mode (SBM): A Linux memory block spans 2..X subblocks (SB). The
49 * size of a Sub Block (SB) is determined based on the device block size, the
50 * pageblock size, and the maximum allocation granularity of the buddy.
51 * Subblocks within a Linux memory block might either be plugged or unplugged.
52 * Memory is added/removed to Linux MM in Linux memory block granularity.
54 * * Big Block Mode (BBM): A Big Block (BB) spans 1..X Linux memory blocks.
55 * Memory is added/removed to Linux MM in Big Block granularity.
57 * The mode is determined automatically based on the Linux memory block size
58 * and the device block size.
60 * User space / core MM (auto onlining) is responsible for onlining added
61 * Linux memory blocks - and for selecting a zone. Linux Memory Blocks are
62 * always onlined separately, and all memory within a Linux memory block is
63 * onlined to the same zone - virtio-mem relies on this behavior.
67 * State of a Linux memory block in SBM.
69 enum virtio_mem_sbm_mb_state {
70 /* Unplugged, not added to Linux. Can be reused later. */
71 VIRTIO_MEM_SBM_MB_UNUSED = 0,
72 /* (Partially) plugged, not added to Linux. Error on add_memory(). */
73 VIRTIO_MEM_SBM_MB_PLUGGED,
74 /* Fully plugged, fully added to Linux, offline. */
75 VIRTIO_MEM_SBM_MB_OFFLINE,
76 /* Partially plugged, fully added to Linux, offline. */
77 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
78 /* Fully plugged, fully added to Linux, onlined to a kernel zone. */
79 VIRTIO_MEM_SBM_MB_KERNEL,
80 /* Partially plugged, fully added to Linux, online to a kernel zone */
81 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
82 /* Fully plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
83 VIRTIO_MEM_SBM_MB_MOVABLE,
84 /* Partially plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
85 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
86 VIRTIO_MEM_SBM_MB_COUNT
90 * State of a Big Block (BB) in BBM, covering 1..X Linux memory blocks.
92 enum virtio_mem_bbm_bb_state {
93 /* Unplugged, not added to Linux. Can be reused later. */
94 VIRTIO_MEM_BBM_BB_UNUSED = 0,
95 /* Plugged, not added to Linux. Error on add_memory(). */
96 VIRTIO_MEM_BBM_BB_PLUGGED,
97 /* Plugged and added to Linux. */
98 VIRTIO_MEM_BBM_BB_ADDED,
99 /* All online parts are fake-offline, ready to remove. */
100 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE,
101 VIRTIO_MEM_BBM_BB_COUNT
105 struct virtio_device *vdev;
107 /* We might first have to unplug all memory when starting up. */
108 bool unplug_all_required;
110 /* Workqueue that processes the plug/unplug requests. */
111 struct work_struct wq;
113 atomic_t config_changed;
115 /* Virtqueue for guest->host requests. */
116 struct virtqueue *vq;
118 /* Wait for a host response to a guest request. */
119 wait_queue_head_t host_resp;
121 /* Space for one guest request and the host response. */
122 struct virtio_mem_req req;
123 struct virtio_mem_resp resp;
125 /* The current size of the device. */
126 uint64_t plugged_size;
127 /* The requested size of the device. */
128 uint64_t requested_size;
130 /* The device block size (for communicating with the device). */
131 uint64_t device_block_size;
132 /* The determined node id for all memory of the device. */
134 /* Physical start address of the memory region. */
136 /* Maximum region size in bytes. */
137 uint64_t region_size;
139 /* The parent resource for all memory added via this device. */
140 struct resource *parent_resource;
142 * Copy of "System RAM (virtio_mem)" to be used for
143 * add_memory_driver_managed().
145 const char *resource_name;
146 /* Memory group identification. */
150 * We don't want to add too much memory if it's not getting onlined,
151 * to avoid running OOM. Besides this threshold, we allow to have at
152 * least two offline blocks at a time (whatever is bigger).
154 #define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD (1024 * 1024 * 1024)
155 atomic64_t offline_size;
156 uint64_t offline_threshold;
158 /* If set, the driver is in SBM, otherwise in BBM. */
163 /* Id of the first memory block of this device. */
164 unsigned long first_mb_id;
165 /* Id of the last usable memory block of this device. */
166 unsigned long last_usable_mb_id;
167 /* Id of the next memory bock to prepare when needed. */
168 unsigned long next_mb_id;
170 /* The subblock size. */
172 /* The number of subblocks per Linux memory block. */
175 /* Summary of all memory block states. */
176 unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
179 * One byte state per memory block. Allocated via
180 * vmalloc(). Resized (alloc+copy+free) on demand.
182 * With 128 MiB memory blocks, we have states for 512
183 * GiB of memory in one 4 KiB page.
188 * Bitmap: one bit per subblock. Allocated similar to
191 * A set bit means the corresponding subblock is
192 * plugged, otherwise it's unblocked.
194 * With 4 MiB subblocks, we manage 128 GiB of memory
197 unsigned long *sb_states;
201 /* Id of the first big block of this device. */
202 unsigned long first_bb_id;
203 /* Id of the last usable big block of this device. */
204 unsigned long last_usable_bb_id;
205 /* Id of the next device bock to prepare when needed. */
206 unsigned long next_bb_id;
208 /* Summary of all big block states. */
209 unsigned long bb_count[VIRTIO_MEM_BBM_BB_COUNT];
211 /* One byte state per big block. See sbm.mb_states. */
214 /* The block size used for plugging/adding/removing. */
220 * Mutex that protects the sbm.mb_count, sbm.mb_states,
221 * sbm.sb_states, bbm.bb_count, and bbm.bb_states
223 * When this lock is held the pointers can't change, ONLINE and
224 * OFFLINE blocks can't change the state and no subblocks will get
227 struct mutex hotplug_mutex;
230 /* An error occurred we cannot handle - stop processing requests. */
233 /* The driver is being removed. */
234 spinlock_t removal_lock;
237 /* Timer for retrying to plug/unplug memory. */
238 struct hrtimer retry_timer;
239 unsigned int retry_timer_ms;
240 #define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000
241 #define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000
243 /* Memory notifier (online/offline events). */
244 struct notifier_block memory_notifier;
246 /* Next device in the list of virtio-mem devices. */
247 struct list_head next;
251 * We have to share a single online_page callback among all virtio-mem
252 * devices. We use RCU to iterate the list in the callback.
254 static DEFINE_MUTEX(virtio_mem_mutex);
255 static LIST_HEAD(virtio_mem_devices);
257 static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
258 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
259 unsigned long nr_pages);
260 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
261 unsigned long nr_pages);
262 static void virtio_mem_retry(struct virtio_mem *vm);
265 * Register a virtio-mem device so it will be considered for the online_page
268 static int register_virtio_mem_device(struct virtio_mem *vm)
272 /* First device registers the callback. */
273 mutex_lock(&virtio_mem_mutex);
274 if (list_empty(&virtio_mem_devices))
275 rc = set_online_page_callback(&virtio_mem_online_page_cb);
277 list_add_rcu(&vm->next, &virtio_mem_devices);
278 mutex_unlock(&virtio_mem_mutex);
284 * Unregister a virtio-mem device so it will no longer be considered for the
285 * online_page callback.
287 static void unregister_virtio_mem_device(struct virtio_mem *vm)
289 /* Last device unregisters the callback. */
290 mutex_lock(&virtio_mem_mutex);
291 list_del_rcu(&vm->next);
292 if (list_empty(&virtio_mem_devices))
293 restore_online_page_callback(&virtio_mem_online_page_cb);
294 mutex_unlock(&virtio_mem_mutex);
300 * Calculate the memory block id of a given address.
302 static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
304 return addr / memory_block_size_bytes();
308 * Calculate the physical start address of a given memory block id.
310 static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
312 return mb_id * memory_block_size_bytes();
316 * Calculate the big block id of a given address.
318 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm,
321 return addr / vm->bbm.bb_size;
325 * Calculate the physical start address of a given big block id.
327 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm,
330 return bb_id * vm->bbm.bb_size;
334 * Calculate the subblock id of a given address.
336 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
339 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
340 const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
342 return (addr - mb_addr) / vm->sbm.sb_size;
346 * Set the state of a big block, taking care of the state counter.
348 static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm,
350 enum virtio_mem_bbm_bb_state state)
352 const unsigned long idx = bb_id - vm->bbm.first_bb_id;
353 enum virtio_mem_bbm_bb_state old_state;
355 old_state = vm->bbm.bb_states[idx];
356 vm->bbm.bb_states[idx] = state;
358 BUG_ON(vm->bbm.bb_count[old_state] == 0);
359 vm->bbm.bb_count[old_state]--;
360 vm->bbm.bb_count[state]++;
364 * Get the state of a big block.
366 static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm,
369 return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id];
373 * Prepare the big block state array for the next big block.
375 static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm)
377 unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id;
378 unsigned long new_bytes = old_bytes + 1;
379 int old_pages = PFN_UP(old_bytes);
380 int new_pages = PFN_UP(new_bytes);
383 if (vm->bbm.bb_states && old_pages == new_pages)
386 new_array = vzalloc(new_pages * PAGE_SIZE);
390 mutex_lock(&vm->hotplug_mutex);
391 if (vm->bbm.bb_states)
392 memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE);
393 vfree(vm->bbm.bb_states);
394 vm->bbm.bb_states = new_array;
395 mutex_unlock(&vm->hotplug_mutex);
400 #define virtio_mem_bbm_for_each_bb(_vm, _bb_id, _state) \
401 for (_bb_id = vm->bbm.first_bb_id; \
402 _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \
404 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
406 #define virtio_mem_bbm_for_each_bb_rev(_vm, _bb_id, _state) \
407 for (_bb_id = vm->bbm.next_bb_id - 1; \
408 _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \
410 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
413 * Set the state of a memory block, taking care of the state counter.
415 static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm,
416 unsigned long mb_id, uint8_t state)
418 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
421 old_state = vm->sbm.mb_states[idx];
422 vm->sbm.mb_states[idx] = state;
424 BUG_ON(vm->sbm.mb_count[old_state] == 0);
425 vm->sbm.mb_count[old_state]--;
426 vm->sbm.mb_count[state]++;
430 * Get the state of a memory block.
432 static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm,
435 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
437 return vm->sbm.mb_states[idx];
441 * Prepare the state array for the next memory block.
443 static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
445 int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
446 int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
449 if (vm->sbm.mb_states && old_pages == new_pages)
452 new_array = vzalloc(new_pages * PAGE_SIZE);
456 mutex_lock(&vm->hotplug_mutex);
457 if (vm->sbm.mb_states)
458 memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
459 vfree(vm->sbm.mb_states);
460 vm->sbm.mb_states = new_array;
461 mutex_unlock(&vm->hotplug_mutex);
466 #define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \
467 for (_mb_id = _vm->sbm.first_mb_id; \
468 _mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \
470 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
472 #define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \
473 for (_mb_id = _vm->sbm.next_mb_id - 1; \
474 _mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \
476 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
479 * Calculate the bit number in the subblock bitmap for the given subblock
480 * inside the given memory block.
482 static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm,
483 unsigned long mb_id, int sb_id)
485 return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id;
489 * Mark all selected subblocks plugged.
491 * Will not modify the state of the memory block.
493 static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
494 unsigned long mb_id, int sb_id,
497 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
499 __bitmap_set(vm->sbm.sb_states, bit, count);
503 * Mark all selected subblocks unplugged.
505 * Will not modify the state of the memory block.
507 static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
508 unsigned long mb_id, int sb_id,
511 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
513 __bitmap_clear(vm->sbm.sb_states, bit, count);
517 * Test if all selected subblocks are plugged.
519 static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
520 unsigned long mb_id, int sb_id,
523 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
526 return test_bit(bit, vm->sbm.sb_states);
528 /* TODO: Helper similar to bitmap_set() */
529 return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
534 * Test if all selected subblocks are unplugged.
536 static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
537 unsigned long mb_id, int sb_id,
540 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
542 /* TODO: Helper similar to bitmap_set() */
543 return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
548 * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is
551 static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
554 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0);
556 return find_next_zero_bit(vm->sbm.sb_states,
557 bit + vm->sbm.sbs_per_mb, bit) - bit;
561 * Prepare the subblock bitmap for the next memory block.
563 static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
565 const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id;
566 const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb;
567 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb;
568 int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
569 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
570 unsigned long *new_bitmap, *old_bitmap;
572 if (vm->sbm.sb_states && old_pages == new_pages)
575 new_bitmap = vzalloc(new_pages * PAGE_SIZE);
579 mutex_lock(&vm->hotplug_mutex);
581 memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
583 old_bitmap = vm->sbm.sb_states;
584 vm->sbm.sb_states = new_bitmap;
585 mutex_unlock(&vm->hotplug_mutex);
592 * Test if we could add memory without creating too much offline memory -
593 * to avoid running OOM if memory is getting onlined deferred.
595 static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size)
597 if (WARN_ON_ONCE(size > vm->offline_threshold))
600 return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold;
604 * Try adding memory to Linux. Will usually only fail if out of memory.
606 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
609 * Will not modify the state of memory blocks in virtio-mem.
611 static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
617 * When force-unloading the driver and we still have memory added to
618 * Linux, the resource name has to stay.
620 if (!vm->resource_name) {
621 vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
623 if (!vm->resource_name)
627 dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr,
629 /* Memory might get onlined immediately. */
630 atomic64_add(size, &vm->offline_size);
631 rc = add_memory_driver_managed(vm->mgid, addr, size, vm->resource_name,
632 MHP_MERGE_RESOURCE | MHP_NID_IS_MGID);
634 atomic64_sub(size, &vm->offline_size);
635 dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
637 * TODO: Linux MM does not properly clean up yet in all cases
638 * where adding of memory failed - especially on -ENOMEM.
645 * See virtio_mem_add_memory(): Try adding a single Linux memory block.
647 static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id)
649 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
650 const uint64_t size = memory_block_size_bytes();
652 return virtio_mem_add_memory(vm, addr, size);
656 * See virtio_mem_add_memory(): Try adding a big block.
658 static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id)
660 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
661 const uint64_t size = vm->bbm.bb_size;
663 return virtio_mem_add_memory(vm, addr, size);
667 * Try removing memory from Linux. Will only fail if memory blocks aren't
670 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
673 * Will not modify the state of memory blocks in virtio-mem.
675 static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
680 dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
682 rc = remove_memory(addr, size);
684 atomic64_sub(size, &vm->offline_size);
686 * We might have freed up memory we can now unplug, retry
687 * immediately instead of waiting.
689 virtio_mem_retry(vm);
691 dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc);
697 * See virtio_mem_remove_memory(): Try removing a single Linux memory block.
699 static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id)
701 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
702 const uint64_t size = memory_block_size_bytes();
704 return virtio_mem_remove_memory(vm, addr, size);
708 * Try offlining and removing memory from Linux.
710 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
713 * Will not modify the state of memory blocks in virtio-mem.
715 static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
721 dev_dbg(&vm->vdev->dev,
722 "offlining and removing memory: 0x%llx - 0x%llx\n", addr,
725 rc = offline_and_remove_memory(addr, size);
727 atomic64_sub(size, &vm->offline_size);
729 * We might have freed up memory we can now unplug, retry
730 * immediately instead of waiting.
732 virtio_mem_retry(vm);
734 dev_dbg(&vm->vdev->dev,
735 "offlining and removing memory failed: %d\n", rc);
741 * See virtio_mem_offline_and_remove_memory(): Try offlining and removing
742 * a single Linux memory block.
744 static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
747 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
748 const uint64_t size = memory_block_size_bytes();
750 return virtio_mem_offline_and_remove_memory(vm, addr, size);
754 * See virtio_mem_offline_and_remove_memory(): Try to offline and remove a
755 * all Linux memory blocks covered by the big block.
757 static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm,
760 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
761 const uint64_t size = vm->bbm.bb_size;
763 return virtio_mem_offline_and_remove_memory(vm, addr, size);
767 * Trigger the workqueue so the device can perform its magic.
769 static void virtio_mem_retry(struct virtio_mem *vm)
773 spin_lock_irqsave(&vm->removal_lock, flags);
775 queue_work(system_freezable_wq, &vm->wq);
776 spin_unlock_irqrestore(&vm->removal_lock, flags);
779 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
781 int node = NUMA_NO_NODE;
783 #if defined(CONFIG_ACPI_NUMA)
784 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
785 node = pxm_to_node(node_id);
791 * Test if a virtio-mem device overlaps with the given range. Can be called
792 * from (notifier) callbacks lockless.
794 static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start,
797 return start < vm->addr + vm->region_size && vm->addr < start + size;
801 * Test if a virtio-mem device contains a given range. Can be called from
802 * (notifier) callbacks lockless.
804 static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start,
807 return start >= vm->addr && start + size <= vm->addr + vm->region_size;
810 static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm,
813 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
814 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
815 case VIRTIO_MEM_SBM_MB_OFFLINE:
820 dev_warn_ratelimited(&vm->vdev->dev,
821 "memory block onlining denied\n");
825 static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
828 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
829 case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
830 case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
831 virtio_mem_sbm_set_mb_state(vm, mb_id,
832 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
834 case VIRTIO_MEM_SBM_MB_KERNEL:
835 case VIRTIO_MEM_SBM_MB_MOVABLE:
836 virtio_mem_sbm_set_mb_state(vm, mb_id,
837 VIRTIO_MEM_SBM_MB_OFFLINE);
845 static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
847 unsigned long start_pfn)
849 const bool is_movable = page_zonenum(pfn_to_page(start_pfn)) ==
853 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
854 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
855 new_state = VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL;
857 new_state = VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL;
859 case VIRTIO_MEM_SBM_MB_OFFLINE:
860 new_state = VIRTIO_MEM_SBM_MB_KERNEL;
862 new_state = VIRTIO_MEM_SBM_MB_MOVABLE;
868 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
871 static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm,
874 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
878 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
879 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
881 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
882 sb_id * vm->sbm.sb_size);
883 virtio_mem_fake_offline_going_offline(pfn, nr_pages);
887 static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm,
890 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
894 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
895 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
897 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
898 sb_id * vm->sbm.sb_size);
899 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
903 static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm,
906 unsigned long nr_pages)
909 * When marked as "fake-offline", all online memory of this device block
910 * is allocated by us. Otherwise, we don't have any memory allocated.
912 if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
913 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
915 virtio_mem_fake_offline_going_offline(pfn, nr_pages);
918 static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm,
921 unsigned long nr_pages)
923 if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
924 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
926 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
930 * This callback will either be called synchronously from add_memory() or
931 * asynchronously (e.g., triggered via user space). We have to be careful
932 * with locking when calling add_memory().
934 static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
935 unsigned long action, void *arg)
937 struct virtio_mem *vm = container_of(nb, struct virtio_mem,
939 struct memory_notify *mhp = arg;
940 const unsigned long start = PFN_PHYS(mhp->start_pfn);
941 const unsigned long size = PFN_PHYS(mhp->nr_pages);
945 if (!virtio_mem_overlaps_range(vm, start, size))
949 id = virtio_mem_phys_to_mb_id(start);
951 * In SBM, we add memory in separate memory blocks - we expect
952 * it to be onlined/offlined in the same granularity. Bail out
953 * if this ever changes.
955 if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
956 !IS_ALIGNED(start, memory_block_size_bytes())))
959 id = virtio_mem_phys_to_bb_id(vm, start);
961 * In BBM, we only care about onlining/offlining happening
962 * within a single big block, we don't care about the
963 * actual granularity as we don't track individual Linux
966 if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1)))
971 * Avoid circular locking lockdep warnings. We lock the mutex
972 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
973 * blocking_notifier_call_chain() has it's own lock, which gets unlocked
974 * between both notifier calls and will bail out. False positive.
979 case MEM_GOING_OFFLINE:
980 mutex_lock(&vm->hotplug_mutex);
982 rc = notifier_from_errno(-EBUSY);
983 mutex_unlock(&vm->hotplug_mutex);
986 vm->hotplug_active = true;
988 virtio_mem_sbm_notify_going_offline(vm, id);
990 virtio_mem_bbm_notify_going_offline(vm, id,
994 case MEM_GOING_ONLINE:
995 mutex_lock(&vm->hotplug_mutex);
997 rc = notifier_from_errno(-EBUSY);
998 mutex_unlock(&vm->hotplug_mutex);
1001 vm->hotplug_active = true;
1003 rc = virtio_mem_sbm_notify_going_online(vm, id);
1007 virtio_mem_sbm_notify_offline(vm, id);
1009 atomic64_add(size, &vm->offline_size);
1011 * Trigger the workqueue. Now that we have some offline memory,
1012 * maybe we can handle pending unplug requests.
1015 virtio_mem_retry(vm);
1017 vm->hotplug_active = false;
1018 mutex_unlock(&vm->hotplug_mutex);
1022 virtio_mem_sbm_notify_online(vm, id, mhp->start_pfn);
1024 atomic64_sub(size, &vm->offline_size);
1026 * Start adding more memory once we onlined half of our
1027 * threshold. Don't trigger if it's possibly due to our actipn
1028 * (e.g., us adding memory which gets onlined immediately from
1031 if (!atomic_read(&vm->wq_active) &&
1032 virtio_mem_could_add_memory(vm, vm->offline_threshold / 2))
1033 virtio_mem_retry(vm);
1035 vm->hotplug_active = false;
1036 mutex_unlock(&vm->hotplug_mutex);
1038 case MEM_CANCEL_OFFLINE:
1039 if (!vm->hotplug_active)
1042 virtio_mem_sbm_notify_cancel_offline(vm, id);
1044 virtio_mem_bbm_notify_cancel_offline(vm, id,
1047 vm->hotplug_active = false;
1048 mutex_unlock(&vm->hotplug_mutex);
1050 case MEM_CANCEL_ONLINE:
1051 if (!vm->hotplug_active)
1053 vm->hotplug_active = false;
1054 mutex_unlock(&vm->hotplug_mutex);
1066 * Set a range of pages PG_offline. Remember pages that were never onlined
1067 * (via generic_online_page()) using PageDirty().
1069 static void virtio_mem_set_fake_offline(unsigned long pfn,
1070 unsigned long nr_pages, bool onlined)
1072 page_offline_begin();
1073 for (; nr_pages--; pfn++) {
1074 struct page *page = pfn_to_page(pfn);
1076 __SetPageOffline(page);
1079 /* FIXME: remove after cleanups */
1080 ClearPageReserved(page);
1087 * Clear PG_offline from a range of pages. If the pages were never onlined,
1088 * (via generic_online_page()), clear PageDirty().
1090 static void virtio_mem_clear_fake_offline(unsigned long pfn,
1091 unsigned long nr_pages, bool onlined)
1093 for (; nr_pages--; pfn++) {
1094 struct page *page = pfn_to_page(pfn);
1096 __ClearPageOffline(page);
1098 ClearPageDirty(page);
1103 * Release a range of fake-offline pages to the buddy, effectively
1104 * fake-onlining them.
1106 static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
1108 const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES;
1112 * We are always called at least with MAX_ORDER_NR_PAGES
1113 * granularity/alignment (e.g., the way subblocks work). All pages
1114 * inside such a block are alike.
1116 for (i = 0; i < nr_pages; i += max_nr_pages) {
1117 struct page *page = pfn_to_page(pfn + i);
1120 * If the page is PageDirty(), it was kept fake-offline when
1121 * onlining the memory block. Otherwise, it was allocated
1122 * using alloc_contig_range(). All pages in a subblock are
1125 if (PageDirty(page)) {
1126 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
1128 generic_online_page(page, MAX_ORDER - 1);
1130 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
1132 free_contig_range(pfn + i, max_nr_pages);
1133 adjust_managed_page_count(page, max_nr_pages);
1139 * Try to allocate a range, marking pages fake-offline, effectively
1140 * fake-offlining them.
1142 static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
1144 const bool is_movable = page_zonenum(pfn_to_page(pfn)) ==
1146 int rc, retry_count;
1149 * TODO: We want an alloc_contig_range() mode that tries to allocate
1150 * harder (e.g., dealing with temporarily pinned pages, PCP), especially
1151 * with ZONE_MOVABLE. So for now, retry a couple of times with
1152 * ZONE_MOVABLE before giving up - because that zone is supposed to give
1155 for (retry_count = 0; retry_count < 5; retry_count++) {
1156 rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
1159 /* whoops, out of memory */
1161 else if (rc && !is_movable)
1166 virtio_mem_set_fake_offline(pfn, nr_pages, true);
1167 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
1175 * Handle fake-offline pages when memory is going offline - such that the
1176 * pages can be skipped by mm-core when offlining.
1178 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
1179 unsigned long nr_pages)
1185 * Drop our reference to the pages so the memory can get offlined
1186 * and add the unplugged pages to the managed page counters (so
1187 * offlining code can correctly subtract them again).
1189 adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
1190 /* Drop our reference to the pages so the memory can get offlined. */
1191 for (i = 0; i < nr_pages; i++) {
1192 page = pfn_to_page(pfn + i);
1193 if (WARN_ON(!page_ref_dec_and_test(page)))
1194 dump_page(page, "fake-offline page referenced");
1199 * Handle fake-offline pages when memory offlining is canceled - to undo
1200 * what we did in virtio_mem_fake_offline_going_offline().
1202 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
1203 unsigned long nr_pages)
1208 * Get the reference we dropped when going offline and subtract the
1209 * unplugged pages from the managed page counters.
1211 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
1212 for (i = 0; i < nr_pages; i++)
1213 page_ref_inc(pfn_to_page(pfn + i));
1216 static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
1218 const unsigned long addr = page_to_phys(page);
1219 unsigned long id, sb_id;
1220 struct virtio_mem *vm;
1224 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
1225 if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
1230 * We exploit here that subblocks have at least
1231 * MAX_ORDER_NR_PAGES size/alignment - so we cannot
1232 * cross subblocks within one call.
1234 id = virtio_mem_phys_to_mb_id(addr);
1235 sb_id = virtio_mem_phys_to_sb_id(vm, addr);
1236 do_online = virtio_mem_sbm_test_sb_plugged(vm, id,
1240 * If the whole block is marked fake offline, keep
1241 * everything that way.
1243 id = virtio_mem_phys_to_bb_id(vm, addr);
1244 do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
1245 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
1249 * virtio_mem_set_fake_offline() might sleep, we don't need
1250 * the device anymore. See virtio_mem_remove() how races
1251 * between memory onlining and device removal are handled.
1256 generic_online_page(page, order);
1258 virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
1264 /* not virtio-mem memory, but e.g., a DIMM. online it */
1265 generic_online_page(page, order);
1268 static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
1269 const struct virtio_mem_req *req)
1271 struct scatterlist *sgs[2], sg_req, sg_resp;
1275 /* don't use the request residing on the stack (vaddr) */
1278 /* out: buffer for request */
1279 sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
1282 /* in: buffer for response */
1283 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
1286 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
1290 virtqueue_kick(vm->vq);
1292 /* wait for a response */
1293 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
1295 return virtio16_to_cpu(vm->vdev, vm->resp.type);
1298 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
1301 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1302 const struct virtio_mem_req req = {
1303 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
1304 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
1305 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1309 if (atomic_read(&vm->config_changed))
1312 dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
1315 switch (virtio_mem_send_request(vm, &req)) {
1316 case VIRTIO_MEM_RESP_ACK:
1317 vm->plugged_size += size;
1319 case VIRTIO_MEM_RESP_NACK:
1322 case VIRTIO_MEM_RESP_BUSY:
1325 case VIRTIO_MEM_RESP_ERROR:
1332 dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc);
1336 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
1339 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1340 const struct virtio_mem_req req = {
1341 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
1342 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
1343 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1347 if (atomic_read(&vm->config_changed))
1350 dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
1353 switch (virtio_mem_send_request(vm, &req)) {
1354 case VIRTIO_MEM_RESP_ACK:
1355 vm->plugged_size -= size;
1357 case VIRTIO_MEM_RESP_BUSY:
1360 case VIRTIO_MEM_RESP_ERROR:
1367 dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc);
1371 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
1373 const struct virtio_mem_req req = {
1374 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
1378 dev_dbg(&vm->vdev->dev, "unplugging all memory");
1380 switch (virtio_mem_send_request(vm, &req)) {
1381 case VIRTIO_MEM_RESP_ACK:
1382 vm->unplug_all_required = false;
1383 vm->plugged_size = 0;
1384 /* usable region might have shrunk */
1385 atomic_set(&vm->config_changed, 1);
1387 case VIRTIO_MEM_RESP_BUSY:
1394 dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc);
1399 * Plug selected subblocks. Updates the plugged state, but not the state
1400 * of the memory block.
1402 static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
1403 int sb_id, int count)
1405 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1406 sb_id * vm->sbm.sb_size;
1407 const uint64_t size = count * vm->sbm.sb_size;
1410 rc = virtio_mem_send_plug_request(vm, addr, size);
1412 virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
1417 * Unplug selected subblocks. Updates the plugged state, but not the state
1418 * of the memory block.
1420 static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
1421 int sb_id, int count)
1423 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1424 sb_id * vm->sbm.sb_size;
1425 const uint64_t size = count * vm->sbm.sb_size;
1428 rc = virtio_mem_send_unplug_request(vm, addr, size);
1430 virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
1435 * Request to unplug a big block.
1437 * Will not modify the state of the big block.
1439 static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id)
1441 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1442 const uint64_t size = vm->bbm.bb_size;
1444 return virtio_mem_send_unplug_request(vm, addr, size);
1448 * Request to plug a big block.
1450 * Will not modify the state of the big block.
1452 static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id)
1454 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1455 const uint64_t size = vm->bbm.bb_size;
1457 return virtio_mem_send_plug_request(vm, addr, size);
1461 * Unplug the desired number of plugged subblocks of a offline or not-added
1462 * memory block. Will fail if any subblock cannot get unplugged (instead of
1465 * Will not modify the state of the memory block.
1467 * Note: can fail after some subblocks were unplugged.
1469 static int virtio_mem_sbm_unplug_any_sb_raw(struct virtio_mem *vm,
1470 unsigned long mb_id, uint64_t *nb_sb)
1475 sb_id = vm->sbm.sbs_per_mb - 1;
1477 /* Find the next candidate subblock */
1478 while (sb_id >= 0 &&
1479 virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
1483 /* Try to unplug multiple subblocks at a time */
1485 while (count < *nb_sb && sb_id > 0 &&
1486 virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1491 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1502 * Unplug all plugged subblocks of an offline or not-added memory block.
1504 * Will not modify the state of the memory block.
1506 * Note: can fail after some subblocks were unplugged.
1508 static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
1510 uint64_t nb_sb = vm->sbm.sbs_per_mb;
1512 return virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, &nb_sb);
1516 * Prepare tracking data for the next memory block.
1518 static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
1519 unsigned long *mb_id)
1523 if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id)
1526 /* Resize the state array if required. */
1527 rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm);
1531 /* Resize the subblock bitmap if required. */
1532 rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
1536 vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++;
1537 *mb_id = vm->sbm.next_mb_id++;
1542 * Try to plug the desired number of subblocks and add the memory block
1545 * Will modify the state of the memory block.
1547 static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
1548 unsigned long mb_id, uint64_t *nb_sb)
1550 const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
1553 if (WARN_ON_ONCE(!count))
1557 * Plug the requested number of subblocks before adding it to linux,
1558 * so that onlining will directly online all plugged subblocks.
1560 rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count);
1565 * Mark the block properly offline before adding it to Linux,
1566 * so the memory notifiers will find the block in the right state.
1568 if (count == vm->sbm.sbs_per_mb)
1569 virtio_mem_sbm_set_mb_state(vm, mb_id,
1570 VIRTIO_MEM_SBM_MB_OFFLINE);
1572 virtio_mem_sbm_set_mb_state(vm, mb_id,
1573 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1575 /* Add the memory block to linux - if that fails, try to unplug. */
1576 rc = virtio_mem_sbm_add_mb(vm, mb_id);
1578 int new_state = VIRTIO_MEM_SBM_MB_UNUSED;
1580 if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count))
1581 new_state = VIRTIO_MEM_SBM_MB_PLUGGED;
1582 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
1591 * Try to plug the desired number of subblocks of a memory block that
1592 * is already added to Linux.
1594 * Will modify the state of the memory block.
1596 * Note: Can fail after some subblocks were successfully plugged.
1598 static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
1599 unsigned long mb_id, uint64_t *nb_sb)
1601 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1602 unsigned long pfn, nr_pages;
1606 if (WARN_ON_ONCE(!*nb_sb))
1610 sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
1611 if (sb_id >= vm->sbm.sbs_per_mb)
1614 while (count < *nb_sb &&
1615 sb_id + count < vm->sbm.sbs_per_mb &&
1616 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
1619 rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count);
1623 if (old_state == VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
1626 /* fake-online the pages if the memory block is online */
1627 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1628 sb_id * vm->sbm.sb_size);
1629 nr_pages = PFN_DOWN(count * vm->sbm.sb_size);
1630 virtio_mem_fake_online(pfn, nr_pages);
1633 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1634 virtio_mem_sbm_set_mb_state(vm, mb_id, old_state - 1);
1639 static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1641 const int mb_states[] = {
1642 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
1643 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
1644 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
1646 uint64_t nb_sb = diff / vm->sbm.sb_size;
1647 unsigned long mb_id;
1653 /* Don't race with onlining/offlining */
1654 mutex_lock(&vm->hotplug_mutex);
1656 for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
1657 virtio_mem_sbm_for_each_mb(vm, mb_id, mb_states[i]) {
1658 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb);
1666 * We won't be working on online/offline memory blocks from this point,
1667 * so we can't race with memory onlining/offlining. Drop the mutex.
1669 mutex_unlock(&vm->hotplug_mutex);
1671 /* Try to plug and add unused blocks */
1672 virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) {
1673 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1676 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1682 /* Try to prepare, plug and add new blocks */
1684 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1687 rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id);
1690 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1698 mutex_unlock(&vm->hotplug_mutex);
1703 * Plug a big block and add it to Linux.
1705 * Will modify the state of the big block.
1707 static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm,
1708 unsigned long bb_id)
1712 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
1713 VIRTIO_MEM_BBM_BB_UNUSED))
1716 rc = virtio_mem_bbm_plug_bb(vm, bb_id);
1719 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
1721 rc = virtio_mem_bbm_add_bb(vm, bb_id);
1723 if (!virtio_mem_bbm_unplug_bb(vm, bb_id))
1724 virtio_mem_bbm_set_bb_state(vm, bb_id,
1725 VIRTIO_MEM_BBM_BB_UNUSED);
1727 /* Retry from the main loop. */
1728 virtio_mem_bbm_set_bb_state(vm, bb_id,
1729 VIRTIO_MEM_BBM_BB_PLUGGED);
1736 * Prepare tracking data for the next big block.
1738 static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm,
1739 unsigned long *bb_id)
1743 if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id)
1746 /* Resize the big block state array if required. */
1747 rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm);
1751 vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++;
1752 *bb_id = vm->bbm.next_bb_id;
1753 vm->bbm.next_bb_id++;
1757 static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1759 uint64_t nb_bb = diff / vm->bbm.bb_size;
1760 unsigned long bb_id;
1766 /* Try to plug and add unused big blocks */
1767 virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) {
1768 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1771 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1779 /* Try to prepare, plug and add new big blocks */
1781 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1784 rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id);
1787 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1799 * Try to plug the requested amount of memory.
1801 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1804 return virtio_mem_sbm_plug_request(vm, diff);
1805 return virtio_mem_bbm_plug_request(vm, diff);
1809 * Unplug the desired number of plugged subblocks of an offline memory block.
1810 * Will fail if any subblock cannot get unplugged (instead of skipping it).
1812 * Will modify the state of the memory block. Might temporarily drop the
1815 * Note: Can fail after some subblocks were successfully unplugged.
1817 static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
1818 unsigned long mb_id,
1823 rc = virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, nb_sb);
1825 /* some subblocks might have been unplugged even on failure */
1826 if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1827 virtio_mem_sbm_set_mb_state(vm, mb_id,
1828 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1832 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1834 * Remove the block from Linux - this should never fail.
1835 * Hinder the block from getting onlined by marking it
1836 * unplugged. Temporarily drop the mutex, so
1837 * any pending GOING_ONLINE requests can be serviced/rejected.
1839 virtio_mem_sbm_set_mb_state(vm, mb_id,
1840 VIRTIO_MEM_SBM_MB_UNUSED);
1842 mutex_unlock(&vm->hotplug_mutex);
1843 rc = virtio_mem_sbm_remove_mb(vm, mb_id);
1845 mutex_lock(&vm->hotplug_mutex);
1851 * Unplug the given plugged subblocks of an online memory block.
1853 * Will modify the state of the memory block.
1855 static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
1856 unsigned long mb_id, int sb_id,
1859 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
1860 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1861 unsigned long start_pfn;
1864 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1865 sb_id * vm->sbm.sb_size);
1867 rc = virtio_mem_fake_offline(start_pfn, nr_pages);
1871 /* Try to unplug the allocated memory */
1872 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1874 /* Return the memory to the buddy. */
1875 virtio_mem_fake_online(start_pfn, nr_pages);
1879 switch (old_state) {
1880 case VIRTIO_MEM_SBM_MB_KERNEL:
1881 virtio_mem_sbm_set_mb_state(vm, mb_id,
1882 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL);
1884 case VIRTIO_MEM_SBM_MB_MOVABLE:
1885 virtio_mem_sbm_set_mb_state(vm, mb_id,
1886 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL);
1894 * Unplug the desired number of plugged subblocks of an online memory block.
1895 * Will skip subblock that are busy.
1897 * Will modify the state of the memory block. Might temporarily drop the
1900 * Note: Can fail after some subblocks were successfully unplugged. Can
1901 * return 0 even if subblocks were busy and could not get unplugged.
1903 static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
1904 unsigned long mb_id,
1909 /* If possible, try to unplug the complete block in one shot. */
1910 if (*nb_sb >= vm->sbm.sbs_per_mb &&
1911 virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1912 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0,
1913 vm->sbm.sbs_per_mb);
1915 *nb_sb -= vm->sbm.sbs_per_mb;
1917 } else if (rc != -EBUSY)
1921 /* Fallback to single subblocks. */
1922 for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
1923 /* Find the next candidate subblock */
1924 while (sb_id >= 0 &&
1925 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
1930 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1);
1940 * Once all subblocks of a memory block were unplugged, offline and
1941 * remove it. This will usually not fail, as no memory is in use
1942 * anymore - however some other notifiers might NACK the request.
1944 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1945 mutex_unlock(&vm->hotplug_mutex);
1946 rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
1947 mutex_lock(&vm->hotplug_mutex);
1949 virtio_mem_sbm_set_mb_state(vm, mb_id,
1950 VIRTIO_MEM_SBM_MB_UNUSED);
1957 * Unplug the desired number of plugged subblocks of a memory block that is
1958 * already added to Linux. Will skip subblock of online memory blocks that are
1959 * busy (by the OS). Will fail if any subblock that's not busy cannot get
1962 * Will modify the state of the memory block. Might temporarily drop the
1965 * Note: Can fail after some subblocks were successfully unplugged. Can
1966 * return 0 even if subblocks were busy and could not get unplugged.
1968 static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
1969 unsigned long mb_id,
1972 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1974 switch (old_state) {
1975 case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
1976 case VIRTIO_MEM_SBM_MB_KERNEL:
1977 case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
1978 case VIRTIO_MEM_SBM_MB_MOVABLE:
1979 return virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, nb_sb);
1980 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
1981 case VIRTIO_MEM_SBM_MB_OFFLINE:
1982 return virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, nb_sb);
1987 static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
1989 const int mb_states[] = {
1990 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
1991 VIRTIO_MEM_SBM_MB_OFFLINE,
1992 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
1993 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
1994 VIRTIO_MEM_SBM_MB_MOVABLE,
1995 VIRTIO_MEM_SBM_MB_KERNEL,
1997 uint64_t nb_sb = diff / vm->sbm.sb_size;
1998 unsigned long mb_id;
2005 * We'll drop the mutex a couple of times when it is safe to do so.
2006 * This might result in some blocks switching the state (online/offline)
2007 * and we could miss them in this run - we will retry again later.
2009 mutex_lock(&vm->hotplug_mutex);
2012 * We try unplug from partially plugged blocks first, to try removing
2013 * whole memory blocks along with metadata. We prioritize ZONE_MOVABLE
2014 * as it's more reliable to unplug memory and remove whole memory
2015 * blocks, and we don't want to trigger a zone imbalances by
2016 * accidentially removing too much kernel memory.
2018 for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
2019 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, mb_states[i]) {
2020 rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
2023 mutex_unlock(&vm->hotplug_mutex);
2025 mutex_lock(&vm->hotplug_mutex);
2027 if (!unplug_online && i == 1) {
2028 mutex_unlock(&vm->hotplug_mutex);
2033 mutex_unlock(&vm->hotplug_mutex);
2034 return nb_sb ? -EBUSY : 0;
2036 mutex_unlock(&vm->hotplug_mutex);
2041 * Try to offline and remove a big block from Linux and unplug it. Will fail
2042 * with -EBUSY if some memory is busy and cannot get unplugged.
2044 * Will modify the state of the memory block. Might temporarily drop the
2047 static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
2048 unsigned long bb_id)
2050 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2051 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2052 unsigned long end_pfn = start_pfn + nr_pages;
2057 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
2058 VIRTIO_MEM_BBM_BB_ADDED))
2061 if (bbm_safe_unplug) {
2063 * Start by fake-offlining all memory. Once we marked the device
2064 * block as fake-offline, all newly onlined memory will
2065 * automatically be kept fake-offline. Protect from concurrent
2066 * onlining/offlining until we have a consistent state.
2068 mutex_lock(&vm->hotplug_mutex);
2069 virtio_mem_bbm_set_bb_state(vm, bb_id,
2070 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
2072 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2073 page = pfn_to_online_page(pfn);
2077 rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION);
2080 goto rollback_safe_unplug;
2083 mutex_unlock(&vm->hotplug_mutex);
2086 rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
2088 if (bbm_safe_unplug) {
2089 mutex_lock(&vm->hotplug_mutex);
2090 goto rollback_safe_unplug;
2095 rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
2097 virtio_mem_bbm_set_bb_state(vm, bb_id,
2098 VIRTIO_MEM_BBM_BB_PLUGGED);
2100 virtio_mem_bbm_set_bb_state(vm, bb_id,
2101 VIRTIO_MEM_BBM_BB_UNUSED);
2104 rollback_safe_unplug:
2105 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2106 page = pfn_to_online_page(pfn);
2109 virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
2111 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
2112 mutex_unlock(&vm->hotplug_mutex);
2117 * Test if a big block is completely offline.
2119 static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
2120 unsigned long bb_id)
2122 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2123 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2126 for (pfn = start_pfn; pfn < start_pfn + nr_pages;
2127 pfn += PAGES_PER_SECTION) {
2128 if (pfn_to_online_page(pfn))
2136 * Test if a big block is completely onlined to ZONE_MOVABLE (or offline).
2138 static bool virtio_mem_bbm_bb_is_movable(struct virtio_mem *vm,
2139 unsigned long bb_id)
2141 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2142 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2146 for (pfn = start_pfn; pfn < start_pfn + nr_pages;
2147 pfn += PAGES_PER_SECTION) {
2148 page = pfn_to_online_page(pfn);
2151 if (page_zonenum(page) != ZONE_MOVABLE)
2158 static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
2160 uint64_t nb_bb = diff / vm->bbm.bb_size;
2168 * Try to unplug big blocks. Similar to SBM, start with offline
2171 for (i = 0; i < 3; i++) {
2172 virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
2176 * As we're holding no locks, these checks are racy,
2177 * but we don't care.
2179 if (i == 0 && !virtio_mem_bbm_bb_is_offline(vm, bb_id))
2181 if (i == 1 && !virtio_mem_bbm_bb_is_movable(vm, bb_id))
2183 rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
2191 if (i == 0 && !unplug_online)
2195 return nb_bb ? -EBUSY : 0;
2199 * Try to unplug the requested amount of memory.
2201 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
2204 return virtio_mem_sbm_unplug_request(vm, diff);
2205 return virtio_mem_bbm_unplug_request(vm, diff);
2209 * Try to unplug all blocks that couldn't be unplugged before, for example,
2210 * because the hypervisor was busy.
2212 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
2218 virtio_mem_bbm_for_each_bb(vm, id,
2219 VIRTIO_MEM_BBM_BB_PLUGGED) {
2220 rc = virtio_mem_bbm_unplug_bb(vm, id);
2223 virtio_mem_bbm_set_bb_state(vm, id,
2224 VIRTIO_MEM_BBM_BB_UNUSED);
2229 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) {
2230 rc = virtio_mem_sbm_unplug_mb(vm, id);
2233 virtio_mem_sbm_set_mb_state(vm, id,
2234 VIRTIO_MEM_SBM_MB_UNUSED);
2241 * Update all parts of the config that could have changed.
2243 static void virtio_mem_refresh_config(struct virtio_mem *vm)
2245 const struct range pluggable_range = mhp_get_pluggable_range(true);
2246 uint64_t new_plugged_size, usable_region_size, end_addr;
2248 /* the plugged_size is just a reflection of what _we_ did previously */
2249 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2251 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
2252 vm->plugged_size = new_plugged_size;
2254 /* calculate the last usable memory block id */
2255 virtio_cread_le(vm->vdev, struct virtio_mem_config,
2256 usable_region_size, &usable_region_size);
2257 end_addr = min(vm->addr + usable_region_size - 1,
2258 pluggable_range.end);
2261 vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr);
2262 if (!IS_ALIGNED(end_addr + 1, memory_block_size_bytes()))
2263 vm->sbm.last_usable_mb_id--;
2265 vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm,
2267 if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size))
2268 vm->bbm.last_usable_bb_id--;
2271 * If we cannot plug any of our device memory (e.g., nothing in the
2272 * usable region is addressable), the last usable memory block id will
2273 * be smaller than the first usable memory block id. We'll stop
2274 * attempting to add memory with -ENOSPC from our main loop.
2277 /* see if there is a request to change the size */
2278 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
2279 &vm->requested_size);
2281 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
2282 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
2286 * Workqueue function for handling plug/unplug requests and config updates.
2288 static void virtio_mem_run_wq(struct work_struct *work)
2290 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
2294 hrtimer_cancel(&vm->retry_timer);
2299 atomic_set(&vm->wq_active, 1);
2303 /* Make sure we start with a clean state if there are leftovers. */
2304 if (unlikely(vm->unplug_all_required))
2305 rc = virtio_mem_send_unplug_all_request(vm);
2307 if (atomic_read(&vm->config_changed)) {
2308 atomic_set(&vm->config_changed, 0);
2309 virtio_mem_refresh_config(vm);
2312 /* Unplug any leftovers from previous runs */
2314 rc = virtio_mem_unplug_pending_mb(vm);
2316 if (!rc && vm->requested_size != vm->plugged_size) {
2317 if (vm->requested_size > vm->plugged_size) {
2318 diff = vm->requested_size - vm->plugged_size;
2319 rc = virtio_mem_plug_request(vm, diff);
2321 diff = vm->plugged_size - vm->requested_size;
2322 rc = virtio_mem_unplug_request(vm, diff);
2328 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2332 * We cannot add any more memory (alignment, physical limit)
2333 * or we have too many offline memory blocks.
2338 * The hypervisor cannot process our request right now
2339 * (e.g., out of memory, migrating);
2343 * We cannot free up any memory to unplug it (all plugged memory
2347 /* Out of memory, try again later. */
2348 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
2352 /* Retry immediately (e.g., the config changed). */
2355 /* Unknown error, mark as broken */
2356 dev_err(&vm->vdev->dev,
2357 "unknown error, marking device broken: %d\n", rc);
2361 atomic_set(&vm->wq_active, 0);
2364 static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
2366 struct virtio_mem *vm = container_of(timer, struct virtio_mem,
2369 virtio_mem_retry(vm);
2370 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
2371 VIRTIO_MEM_RETRY_TIMER_MAX_MS);
2372 return HRTIMER_NORESTART;
2375 static void virtio_mem_handle_response(struct virtqueue *vq)
2377 struct virtio_mem *vm = vq->vdev->priv;
2379 wake_up(&vm->host_resp);
2382 static int virtio_mem_init_vq(struct virtio_mem *vm)
2384 struct virtqueue *vq;
2386 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
2395 static int virtio_mem_init(struct virtio_mem *vm)
2397 const struct range pluggable_range = mhp_get_pluggable_range(true);
2398 uint64_t sb_size, addr;
2401 if (!vm->vdev->config->get) {
2402 dev_err(&vm->vdev->dev, "config access disabled\n");
2407 * We don't want to (un)plug or reuse any memory when in kdump. The
2408 * memory is still accessible (but not mapped).
2410 if (is_kdump_kernel()) {
2411 dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
2415 /* Fetch all properties that can't change. */
2416 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2418 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
2419 &vm->device_block_size);
2420 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
2422 vm->nid = virtio_mem_translate_node_id(vm, node_id);
2423 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
2424 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
2427 /* Determine the nid for the device based on the lowest address. */
2428 if (vm->nid == NUMA_NO_NODE)
2429 vm->nid = memory_add_physaddr_to_nid(vm->addr);
2431 /* bad device setup - warn only */
2432 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
2433 dev_warn(&vm->vdev->dev,
2434 "The alignment of the physical start address can make some memory unusable.\n");
2435 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
2436 dev_warn(&vm->vdev->dev,
2437 "The alignment of the physical end address can make some memory unusable.\n");
2438 if (vm->addr < pluggable_range.start ||
2439 vm->addr + vm->region_size - 1 > pluggable_range.end)
2440 dev_warn(&vm->vdev->dev,
2441 "Some device memory is not addressable/pluggable. This can make some memory unusable.\n");
2443 /* Prepare the offline threshold - make sure we can add two blocks. */
2444 vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
2445 VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
2448 * We want subblocks to span at least MAX_ORDER_NR_PAGES and
2449 * pageblock_nr_pages pages. This:
2450 * - Simplifies our page onlining code (virtio_mem_online_page_cb)
2451 * and fake page onlining code (virtio_mem_fake_online).
2452 * - Is required for now for alloc_contig_range() to work reliably -
2453 * it doesn't properly handle smaller granularity on ZONE_NORMAL.
2455 sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES,
2456 pageblock_nr_pages) * PAGE_SIZE;
2457 sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
2459 if (sb_size < memory_block_size_bytes() && !force_bbm) {
2460 /* SBM: At least two subblocks per Linux memory block. */
2462 vm->sbm.sb_size = sb_size;
2463 vm->sbm.sbs_per_mb = memory_block_size_bytes() /
2466 /* Round up to the next full memory block */
2467 addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2468 memory_block_size_bytes() - 1;
2469 vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
2470 vm->sbm.next_mb_id = vm->sbm.first_mb_id;
2472 /* BBM: At least one Linux memory block. */
2473 vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size,
2474 memory_block_size_bytes());
2476 if (bbm_block_size) {
2477 if (!is_power_of_2(bbm_block_size)) {
2478 dev_warn(&vm->vdev->dev,
2479 "bbm_block_size is not a power of 2");
2480 } else if (bbm_block_size < vm->bbm.bb_size) {
2481 dev_warn(&vm->vdev->dev,
2482 "bbm_block_size is too small");
2484 vm->bbm.bb_size = bbm_block_size;
2488 /* Round up to the next aligned big block */
2489 addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2490 vm->bbm.bb_size - 1;
2491 vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
2492 vm->bbm.next_bb_id = vm->bbm.first_bb_id;
2494 /* Make sure we can add two big blocks. */
2495 vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
2496 vm->offline_threshold);
2499 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
2500 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
2501 dev_info(&vm->vdev->dev, "device block size: 0x%llx",
2502 (unsigned long long)vm->device_block_size);
2503 dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
2504 memory_block_size_bytes());
2506 dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
2507 (unsigned long long)vm->sbm.sb_size);
2509 dev_info(&vm->vdev->dev, "big block size: 0x%llx",
2510 (unsigned long long)vm->bbm.bb_size);
2511 if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
2512 dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
2517 static int virtio_mem_create_resource(struct virtio_mem *vm)
2520 * When force-unloading the driver and removing the device, we
2521 * could have a garbage pointer. Duplicate the string.
2523 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
2528 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
2529 name, IORESOURCE_SYSTEM_RAM);
2530 if (!vm->parent_resource) {
2532 dev_warn(&vm->vdev->dev, "could not reserve device region\n");
2533 dev_info(&vm->vdev->dev,
2534 "reloading the driver is not supported\n");
2538 /* The memory is not actually busy - make add_memory() work. */
2539 vm->parent_resource->flags &= ~IORESOURCE_BUSY;
2543 static void virtio_mem_delete_resource(struct virtio_mem *vm)
2547 if (!vm->parent_resource)
2550 name = vm->parent_resource->name;
2551 release_resource(vm->parent_resource);
2552 kfree(vm->parent_resource);
2554 vm->parent_resource = NULL;
2557 static int virtio_mem_range_has_system_ram(struct resource *res, void *arg)
2562 static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
2564 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
2566 return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr,
2567 vm->addr + vm->region_size, NULL,
2568 virtio_mem_range_has_system_ram) == 1;
2571 static int virtio_mem_probe(struct virtio_device *vdev)
2573 struct virtio_mem *vm;
2574 uint64_t unit_pages;
2577 BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
2578 BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
2580 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
2584 init_waitqueue_head(&vm->host_resp);
2586 INIT_WORK(&vm->wq, virtio_mem_run_wq);
2587 mutex_init(&vm->hotplug_mutex);
2588 INIT_LIST_HEAD(&vm->next);
2589 spin_lock_init(&vm->removal_lock);
2590 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2591 vm->retry_timer.function = virtio_mem_timer_expired;
2592 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2594 /* register the virtqueue */
2595 rc = virtio_mem_init_vq(vm);
2599 /* initialize the device by querying the config */
2600 rc = virtio_mem_init(vm);
2604 /* create the parent resource for all memory */
2605 rc = virtio_mem_create_resource(vm);
2609 /* use a single dynamic memory group to cover the whole memory device */
2611 unit_pages = PHYS_PFN(memory_block_size_bytes());
2613 unit_pages = PHYS_PFN(vm->bbm.bb_size);
2614 rc = memory_group_register_dynamic(vm->nid, unit_pages);
2616 goto out_del_resource;
2620 * If we still have memory plugged, we have to unplug all memory first.
2621 * Registering our parent resource makes sure that this memory isn't
2622 * actually in use (e.g., trying to reload the driver).
2624 if (vm->plugged_size) {
2625 vm->unplug_all_required = true;
2626 dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
2629 /* register callbacks */
2630 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
2631 rc = register_memory_notifier(&vm->memory_notifier);
2633 goto out_unreg_group;
2634 rc = register_virtio_mem_device(vm);
2638 virtio_device_ready(vdev);
2640 /* trigger a config update to start processing the requested_size */
2641 atomic_set(&vm->config_changed, 1);
2642 queue_work(system_freezable_wq, &vm->wq);
2646 unregister_memory_notifier(&vm->memory_notifier);
2648 memory_group_unregister(vm->mgid);
2650 virtio_mem_delete_resource(vm);
2652 vdev->config->del_vqs(vdev);
2660 static void virtio_mem_remove(struct virtio_device *vdev)
2662 struct virtio_mem *vm = vdev->priv;
2663 unsigned long mb_id;
2667 * Make sure the workqueue won't be triggered anymore and no memory
2668 * blocks can be onlined/offlined until we're finished here.
2670 mutex_lock(&vm->hotplug_mutex);
2671 spin_lock_irq(&vm->removal_lock);
2672 vm->removing = true;
2673 spin_unlock_irq(&vm->removal_lock);
2674 mutex_unlock(&vm->hotplug_mutex);
2676 /* wait until the workqueue stopped */
2677 cancel_work_sync(&vm->wq);
2678 hrtimer_cancel(&vm->retry_timer);
2682 * After we unregistered our callbacks, user space can online
2683 * partially plugged offline blocks. Make sure to remove them.
2685 virtio_mem_sbm_for_each_mb(vm, mb_id,
2686 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
2687 rc = virtio_mem_sbm_remove_mb(vm, mb_id);
2689 virtio_mem_sbm_set_mb_state(vm, mb_id,
2690 VIRTIO_MEM_SBM_MB_UNUSED);
2693 * After we unregistered our callbacks, user space can no longer
2694 * offline partially plugged online memory blocks. No need to
2699 /* unregister callbacks */
2700 unregister_virtio_mem_device(vm);
2701 unregister_memory_notifier(&vm->memory_notifier);
2704 * There is no way we could reliably remove all memory we have added to
2705 * the system. And there is no way to stop the driver/device from going
2706 * away. Warn at least.
2708 if (virtio_mem_has_memory_added(vm)) {
2709 dev_warn(&vdev->dev, "device still has system memory added\n");
2711 virtio_mem_delete_resource(vm);
2712 kfree_const(vm->resource_name);
2713 memory_group_unregister(vm->mgid);
2716 /* remove all tracking data - no locking needed */
2718 vfree(vm->sbm.mb_states);
2719 vfree(vm->sbm.sb_states);
2721 vfree(vm->bbm.bb_states);
2724 /* reset the device and cleanup the queues */
2725 vdev->config->reset(vdev);
2726 vdev->config->del_vqs(vdev);
2732 static void virtio_mem_config_changed(struct virtio_device *vdev)
2734 struct virtio_mem *vm = vdev->priv;
2736 atomic_set(&vm->config_changed, 1);
2737 virtio_mem_retry(vm);
2740 #ifdef CONFIG_PM_SLEEP
2741 static int virtio_mem_freeze(struct virtio_device *vdev)
2744 * When restarting the VM, all memory is usually unplugged. Don't
2745 * allow to suspend/hibernate.
2747 dev_err(&vdev->dev, "save/restore not supported.\n");
2751 static int virtio_mem_restore(struct virtio_device *vdev)
2757 static unsigned int virtio_mem_features[] = {
2758 #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
2759 VIRTIO_MEM_F_ACPI_PXM,
2763 static const struct virtio_device_id virtio_mem_id_table[] = {
2764 { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
2768 static struct virtio_driver virtio_mem_driver = {
2769 .feature_table = virtio_mem_features,
2770 .feature_table_size = ARRAY_SIZE(virtio_mem_features),
2771 .driver.name = KBUILD_MODNAME,
2772 .driver.owner = THIS_MODULE,
2773 .id_table = virtio_mem_id_table,
2774 .probe = virtio_mem_probe,
2775 .remove = virtio_mem_remove,
2776 .config_changed = virtio_mem_config_changed,
2777 #ifdef CONFIG_PM_SLEEP
2778 .freeze = virtio_mem_freeze,
2779 .restore = virtio_mem_restore,
2783 module_virtio_driver(virtio_mem_driver);
2784 MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
2785 MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
2786 MODULE_DESCRIPTION("Virtio-mem driver");
2787 MODULE_LICENSE("GPL");