1 // SPDX-License-Identifier: GPL-2.0
3 * Memory subsystem support
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/topology.h>
17 #include <linux/capability.h>
18 #include <linux/device.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
22 #include <linux/stat.h>
23 #include <linux/slab.h>
25 #include <linux/atomic.h>
26 #include <linux/uaccess.h>
28 #define MEMORY_CLASS_NAME "memory"
30 #define to_memory_block(dev) container_of(dev, struct memory_block, dev)
32 static int sections_per_block;
34 static inline unsigned long base_memory_block_id(unsigned long section_nr)
36 return section_nr / sections_per_block;
39 static inline unsigned long pfn_to_block_id(unsigned long pfn)
41 return base_memory_block_id(pfn_to_section_nr(pfn));
44 static inline unsigned long phys_to_block_id(unsigned long phys)
46 return pfn_to_block_id(PFN_DOWN(phys));
49 static int memory_subsys_online(struct device *dev);
50 static int memory_subsys_offline(struct device *dev);
52 static struct bus_type memory_subsys = {
53 .name = MEMORY_CLASS_NAME,
54 .dev_name = MEMORY_CLASS_NAME,
55 .online = memory_subsys_online,
56 .offline = memory_subsys_offline,
59 static BLOCKING_NOTIFIER_HEAD(memory_chain);
61 int register_memory_notifier(struct notifier_block *nb)
63 return blocking_notifier_chain_register(&memory_chain, nb);
65 EXPORT_SYMBOL(register_memory_notifier);
67 void unregister_memory_notifier(struct notifier_block *nb)
69 blocking_notifier_chain_unregister(&memory_chain, nb);
71 EXPORT_SYMBOL(unregister_memory_notifier);
73 static void memory_block_release(struct device *dev)
75 struct memory_block *mem = to_memory_block(dev);
80 unsigned long __weak memory_block_size_bytes(void)
82 return MIN_MEMORY_BLOCK_SIZE;
84 EXPORT_SYMBOL_GPL(memory_block_size_bytes);
87 * Show the first physical section index (number) of this memory block.
89 static ssize_t phys_index_show(struct device *dev,
90 struct device_attribute *attr, char *buf)
92 struct memory_block *mem = to_memory_block(dev);
93 unsigned long phys_index;
95 phys_index = mem->start_section_nr / sections_per_block;
96 return sprintf(buf, "%08lx\n", phys_index);
100 * Legacy interface that we cannot remove. Always indicate "removable"
101 * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
103 static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
106 return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
110 * online, offline, going offline, etc.
112 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
115 struct memory_block *mem = to_memory_block(dev);
119 * We can probably put these states in a nice little array
120 * so that they're not open-coded
122 switch (mem->state) {
124 len = sprintf(buf, "online\n");
127 len = sprintf(buf, "offline\n");
129 case MEM_GOING_OFFLINE:
130 len = sprintf(buf, "going-offline\n");
133 len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
142 int memory_notify(unsigned long val, void *v)
144 return blocking_notifier_call_chain(&memory_chain, val, v);
148 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
149 * OK to have direct references to sparsemem variables in here.
152 memory_block_action(unsigned long start_section_nr, unsigned long action,
153 int online_type, int nid)
155 unsigned long start_pfn;
156 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
159 start_pfn = section_nr_to_pfn(start_section_nr);
163 ret = online_pages(start_pfn, nr_pages, online_type, nid);
166 ret = offline_pages(start_pfn, nr_pages);
169 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
170 "%ld\n", __func__, start_section_nr, action, action);
177 static int memory_block_change_state(struct memory_block *mem,
178 unsigned long to_state, unsigned long from_state_req)
182 if (mem->state != from_state_req)
185 if (to_state == MEM_OFFLINE)
186 mem->state = MEM_GOING_OFFLINE;
188 ret = memory_block_action(mem->start_section_nr, to_state,
189 mem->online_type, mem->nid);
191 mem->state = ret ? from_state_req : to_state;
196 /* The device lock serializes operations on memory_subsys_[online|offline] */
197 static int memory_subsys_online(struct device *dev)
199 struct memory_block *mem = to_memory_block(dev);
202 if (mem->state == MEM_ONLINE)
206 * If we are called from state_store(), online_type will be
207 * set >= 0 Otherwise we were called from the device online
208 * attribute and need to set the online_type.
210 if (mem->online_type < 0)
211 mem->online_type = MMOP_ONLINE;
213 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
215 /* clear online_type */
216 mem->online_type = -1;
221 static int memory_subsys_offline(struct device *dev)
223 struct memory_block *mem = to_memory_block(dev);
225 if (mem->state == MEM_OFFLINE)
228 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
231 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
232 const char *buf, size_t count)
234 struct memory_block *mem = to_memory_block(dev);
235 int ret, online_type;
237 ret = lock_device_hotplug_sysfs();
241 if (sysfs_streq(buf, "online_kernel"))
242 online_type = MMOP_ONLINE_KERNEL;
243 else if (sysfs_streq(buf, "online_movable"))
244 online_type = MMOP_ONLINE_MOVABLE;
245 else if (sysfs_streq(buf, "online"))
246 online_type = MMOP_ONLINE;
247 else if (sysfs_streq(buf, "offline"))
248 online_type = MMOP_OFFLINE;
254 switch (online_type) {
255 case MMOP_ONLINE_KERNEL:
256 case MMOP_ONLINE_MOVABLE:
258 /* mem->online_type is protected by device_hotplug_lock */
259 mem->online_type = online_type;
260 ret = device_online(&mem->dev);
263 ret = device_offline(&mem->dev);
266 ret = -EINVAL; /* should never happen */
270 unlock_device_hotplug();
281 * phys_device is a bad name for this. What I really want
282 * is a way to differentiate between memory ranges that
283 * are part of physical devices that constitute
284 * a complete removable unit or fru.
285 * i.e. do these ranges belong to the same physical device,
286 * s.t. if I offline all of these sections I can then
287 * remove the physical device?
289 static ssize_t phys_device_show(struct device *dev,
290 struct device_attribute *attr, char *buf)
292 struct memory_block *mem = to_memory_block(dev);
293 return sprintf(buf, "%d\n", mem->phys_device);
296 #ifdef CONFIG_MEMORY_HOTREMOVE
297 static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
298 unsigned long nr_pages, int online_type,
299 struct zone *default_zone)
303 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
304 if (zone != default_zone) {
306 strcat(buf, zone->name);
310 static ssize_t valid_zones_show(struct device *dev,
311 struct device_attribute *attr, char *buf)
313 struct memory_block *mem = to_memory_block(dev);
314 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
315 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
316 struct zone *default_zone;
320 * Check the existing zone. Make sure that we do that only on the
321 * online nodes otherwise the page_zone is not reliable
323 if (mem->state == MEM_ONLINE) {
325 * The block contains more than one zone can not be offlined.
326 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
328 default_zone = test_pages_in_a_zone(start_pfn,
329 start_pfn + nr_pages);
331 return sprintf(buf, "none\n");
332 strcat(buf, default_zone->name);
337 default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
339 strcat(buf, default_zone->name);
341 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
343 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE,
350 static DEVICE_ATTR_RO(valid_zones);
353 static DEVICE_ATTR_RO(phys_index);
354 static DEVICE_ATTR_RW(state);
355 static DEVICE_ATTR_RO(phys_device);
356 static DEVICE_ATTR_RO(removable);
359 * Show the memory block size (shared by all memory blocks).
361 static ssize_t block_size_bytes_show(struct device *dev,
362 struct device_attribute *attr, char *buf)
364 return sprintf(buf, "%lx\n", memory_block_size_bytes());
367 static DEVICE_ATTR_RO(block_size_bytes);
370 * Memory auto online policy.
373 static ssize_t auto_online_blocks_show(struct device *dev,
374 struct device_attribute *attr, char *buf)
376 if (memhp_auto_online)
377 return sprintf(buf, "online\n");
379 return sprintf(buf, "offline\n");
382 static ssize_t auto_online_blocks_store(struct device *dev,
383 struct device_attribute *attr,
384 const char *buf, size_t count)
386 if (sysfs_streq(buf, "online"))
387 memhp_auto_online = true;
388 else if (sysfs_streq(buf, "offline"))
389 memhp_auto_online = false;
396 static DEVICE_ATTR_RW(auto_online_blocks);
399 * Some architectures will have custom drivers to do this, and
400 * will not need to do it from userspace. The fake hot-add code
401 * as well as ppc64 will do all of their discovery in userspace
402 * and will require this interface.
404 #ifdef CONFIG_ARCH_MEMORY_PROBE
405 static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
406 const char *buf, size_t count)
410 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
412 ret = kstrtoull(buf, 0, &phys_addr);
416 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
419 ret = lock_device_hotplug_sysfs();
423 nid = memory_add_physaddr_to_nid(phys_addr);
424 ret = __add_memory(nid, phys_addr,
425 MIN_MEMORY_BLOCK_SIZE * sections_per_block);
432 unlock_device_hotplug();
436 static DEVICE_ATTR_WO(probe);
439 #ifdef CONFIG_MEMORY_FAILURE
441 * Support for offlining pages of memory
444 /* Soft offline a page */
445 static ssize_t soft_offline_page_store(struct device *dev,
446 struct device_attribute *attr,
447 const char *buf, size_t count)
451 if (!capable(CAP_SYS_ADMIN))
453 if (kstrtoull(buf, 0, &pfn) < 0)
456 ret = soft_offline_page(pfn, 0);
457 return ret == 0 ? count : ret;
460 /* Forcibly offline a page, including killing processes. */
461 static ssize_t hard_offline_page_store(struct device *dev,
462 struct device_attribute *attr,
463 const char *buf, size_t count)
467 if (!capable(CAP_SYS_ADMIN))
469 if (kstrtoull(buf, 0, &pfn) < 0)
472 ret = memory_failure(pfn, 0);
473 return ret ? ret : count;
476 static DEVICE_ATTR_WO(soft_offline_page);
477 static DEVICE_ATTR_WO(hard_offline_page);
481 * Note that phys_device is optional. It is here to allow for
482 * differentiation between which *physical* devices each
483 * section belongs to...
485 int __weak arch_get_memory_phys_device(unsigned long start_pfn)
490 /* A reference for the returned memory block device is acquired. */
491 static struct memory_block *find_memory_block_by_id(unsigned long block_id)
495 dev = subsys_find_device_by_id(&memory_subsys, block_id, NULL);
496 return dev ? to_memory_block(dev) : NULL;
500 * For now, we have a linear search to go find the appropriate
501 * memory_block corresponding to a particular phys_index. If
502 * this gets to be a real problem, we can always use a radix
503 * tree or something here.
505 * This could be made generic for all device subsystems.
507 struct memory_block *find_memory_block(struct mem_section *section)
509 unsigned long block_id = base_memory_block_id(__section_nr(section));
511 return find_memory_block_by_id(block_id);
514 static struct attribute *memory_memblk_attrs[] = {
515 &dev_attr_phys_index.attr,
516 &dev_attr_state.attr,
517 &dev_attr_phys_device.attr,
518 &dev_attr_removable.attr,
519 #ifdef CONFIG_MEMORY_HOTREMOVE
520 &dev_attr_valid_zones.attr,
525 static struct attribute_group memory_memblk_attr_group = {
526 .attrs = memory_memblk_attrs,
529 static const struct attribute_group *memory_memblk_attr_groups[] = {
530 &memory_memblk_attr_group,
535 * register_memory - Setup a sysfs device for a memory block
538 int register_memory(struct memory_block *memory)
542 memory->dev.bus = &memory_subsys;
543 memory->dev.id = memory->start_section_nr / sections_per_block;
544 memory->dev.release = memory_block_release;
545 memory->dev.groups = memory_memblk_attr_groups;
546 memory->dev.offline = memory->state == MEM_OFFLINE;
548 ret = device_register(&memory->dev);
550 put_device(&memory->dev);
555 static int init_memory_block(struct memory_block **memory,
556 unsigned long block_id, unsigned long state)
558 struct memory_block *mem;
559 unsigned long start_pfn;
562 mem = find_memory_block_by_id(block_id);
564 put_device(&mem->dev);
567 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
571 mem->start_section_nr = block_id * sections_per_block;
573 start_pfn = section_nr_to_pfn(mem->start_section_nr);
574 mem->phys_device = arch_get_memory_phys_device(start_pfn);
575 mem->nid = NUMA_NO_NODE;
577 ret = register_memory(mem);
583 static int add_memory_block(unsigned long base_section_nr)
585 int section_count = 0;
586 struct memory_block *mem;
589 for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
591 if (present_section_nr(nr))
594 if (section_count == 0)
596 return init_memory_block(&mem, base_memory_block_id(base_section_nr),
600 static void unregister_memory(struct memory_block *memory)
602 if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
605 /* drop the ref. we got via find_memory_block() */
606 put_device(&memory->dev);
607 device_unregister(&memory->dev);
611 * Create memory block devices for the given memory area. Start and size
612 * have to be aligned to memory block granularity. Memory block devices
613 * will be initialized as offline.
615 * Called under device_hotplug_lock.
617 int create_memory_block_devices(unsigned long start, unsigned long size)
619 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
620 unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
621 struct memory_block *mem;
622 unsigned long block_id;
625 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
626 !IS_ALIGNED(size, memory_block_size_bytes())))
629 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
630 ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
635 end_block_id = block_id;
636 for (block_id = start_block_id; block_id != end_block_id;
638 mem = find_memory_block_by_id(block_id);
639 if (WARN_ON_ONCE(!mem))
641 unregister_memory(mem);
648 * Remove memory block devices for the given memory area. Start and size
649 * have to be aligned to memory block granularity. Memory block devices
650 * have to be offline.
652 * Called under device_hotplug_lock.
654 void remove_memory_block_devices(unsigned long start, unsigned long size)
656 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
657 const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
658 struct memory_block *mem;
659 unsigned long block_id;
661 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
662 !IS_ALIGNED(size, memory_block_size_bytes())))
665 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
666 mem = find_memory_block_by_id(block_id);
667 if (WARN_ON_ONCE(!mem))
669 unregister_memory_block_under_nodes(mem);
670 unregister_memory(mem);
674 /* return true if the memory block is offlined, otherwise, return false */
675 bool is_memblock_offlined(struct memory_block *mem)
677 return mem->state == MEM_OFFLINE;
680 static struct attribute *memory_root_attrs[] = {
681 #ifdef CONFIG_ARCH_MEMORY_PROBE
682 &dev_attr_probe.attr,
685 #ifdef CONFIG_MEMORY_FAILURE
686 &dev_attr_soft_offline_page.attr,
687 &dev_attr_hard_offline_page.attr,
690 &dev_attr_block_size_bytes.attr,
691 &dev_attr_auto_online_blocks.attr,
695 static struct attribute_group memory_root_attr_group = {
696 .attrs = memory_root_attrs,
699 static const struct attribute_group *memory_root_attr_groups[] = {
700 &memory_root_attr_group,
705 * Initialize the sysfs support for memory devices. At the time this function
706 * is called, we cannot have concurrent creation/deletion of memory block
707 * devices, the device_hotplug_lock is not needed.
709 void __init memory_dev_init(void)
712 unsigned long block_sz, nr;
714 /* Validate the configured memory block size */
715 block_sz = memory_block_size_bytes();
716 if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
717 panic("Memory block size not suitable: 0x%lx\n", block_sz);
718 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
720 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
722 panic("%s() failed to register subsystem: %d\n", __func__, ret);
725 * Create entries for memory sections that were found
726 * during boot and have been initialized
728 for (nr = 0; nr <= __highest_present_section_nr;
729 nr += sections_per_block) {
730 ret = add_memory_block(nr);
732 panic("%s() failed to add memory block: %d\n", __func__,
738 * walk_memory_blocks - walk through all present memory blocks overlapped
739 * by the range [start, start + size)
741 * @start: start address of the memory range
742 * @size: size of the memory range
743 * @arg: argument passed to func
744 * @func: callback for each memory section walked
746 * This function walks through all present memory blocks overlapped by the
747 * range [start, start + size), calling func on each memory block.
749 * In case func() returns an error, walking is aborted and the error is
752 int walk_memory_blocks(unsigned long start, unsigned long size,
753 void *arg, walk_memory_blocks_func_t func)
755 const unsigned long start_block_id = phys_to_block_id(start);
756 const unsigned long end_block_id = phys_to_block_id(start + size - 1);
757 struct memory_block *mem;
758 unsigned long block_id;
764 for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
765 mem = find_memory_block_by_id(block_id);
769 ret = func(mem, arg);
770 put_device(&mem->dev);
777 struct for_each_memory_block_cb_data {
778 walk_memory_blocks_func_t func;
782 static int for_each_memory_block_cb(struct device *dev, void *data)
784 struct memory_block *mem = to_memory_block(dev);
785 struct for_each_memory_block_cb_data *cb_data = data;
787 return cb_data->func(mem, cb_data->arg);
791 * for_each_memory_block - walk through all present memory blocks
793 * @arg: argument passed to func
794 * @func: callback for each memory block walked
796 * This function walks through all present memory blocks, calling func on
799 * In case func() returns an error, walking is aborted and the error is
802 int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
804 struct for_each_memory_block_cb_data cb_data = {
809 return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
810 for_each_memory_block_cb);