1 // SPDX-License-Identifier: GPL-2.0
3 * cacheinfo support - processor cache information via sysfs
5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6 * Author: Sudeep Holla <sudeep.holla@arm.com>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/acpi.h>
11 #include <linux/bitops.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/device.h>
16 #include <linux/init.h>
17 #include <linux/of_device.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/sysfs.h>
23 /* pointer to per cpu cacheinfo */
24 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
28 #define per_cpu_cacheinfo_idx(cpu, idx) \
29 (per_cpu_cacheinfo(cpu) + (idx))
31 /* Set if no cache information is found in DT/ACPI. */
32 static bool use_arch_info;
34 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
36 return ci_cacheinfo(cpu);
39 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
40 struct cacheinfo *sib_leaf)
43 * For non DT/ACPI systems, assume unique level 1 caches,
44 * system-wide shared caches for all other levels.
46 if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) ||
48 return (this_leaf->level != 1) && (sib_leaf->level != 1);
50 if ((sib_leaf->attributes & CACHE_ID) &&
51 (this_leaf->attributes & CACHE_ID))
52 return sib_leaf->id == this_leaf->id;
54 return sib_leaf->fw_token == this_leaf->fw_token;
57 bool last_level_cache_is_valid(unsigned int cpu)
59 struct cacheinfo *llc;
61 if (!cache_leaves(cpu))
64 llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
66 return (llc->attributes & CACHE_ID) || !!llc->fw_token;
70 bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
72 struct cacheinfo *llc_x, *llc_y;
74 if (!last_level_cache_is_valid(cpu_x) ||
75 !last_level_cache_is_valid(cpu_y))
78 llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
79 llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
81 return cache_leaves_are_shared(llc_x, llc_y);
86 static bool of_check_cache_nodes(struct device_node *np);
88 /* OF properties to query for a given cache type */
89 struct cache_type_info {
90 const char *size_prop;
91 const char *line_size_props[2];
92 const char *nr_sets_prop;
95 static const struct cache_type_info cache_type_info[] = {
97 .size_prop = "cache-size",
98 .line_size_props = { "cache-line-size",
99 "cache-block-size", },
100 .nr_sets_prop = "cache-sets",
102 .size_prop = "i-cache-size",
103 .line_size_props = { "i-cache-line-size",
104 "i-cache-block-size", },
105 .nr_sets_prop = "i-cache-sets",
107 .size_prop = "d-cache-size",
108 .line_size_props = { "d-cache-line-size",
109 "d-cache-block-size", },
110 .nr_sets_prop = "d-cache-sets",
114 static inline int get_cacheinfo_idx(enum cache_type type)
116 if (type == CACHE_TYPE_UNIFIED)
121 static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
123 const char *propname;
126 ct_idx = get_cacheinfo_idx(this_leaf->type);
127 propname = cache_type_info[ct_idx].size_prop;
129 of_property_read_u32(np, propname, &this_leaf->size);
132 /* not cache_line_size() because that's a macro in include/linux/cache.h */
133 static void cache_get_line_size(struct cacheinfo *this_leaf,
134 struct device_node *np)
138 ct_idx = get_cacheinfo_idx(this_leaf->type);
139 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
141 for (i = 0; i < lim; i++) {
144 const char *propname;
146 propname = cache_type_info[ct_idx].line_size_props[i];
147 ret = of_property_read_u32(np, propname, &line_size);
149 this_leaf->coherency_line_size = line_size;
155 static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
157 const char *propname;
160 ct_idx = get_cacheinfo_idx(this_leaf->type);
161 propname = cache_type_info[ct_idx].nr_sets_prop;
163 of_property_read_u32(np, propname, &this_leaf->number_of_sets);
166 static void cache_associativity(struct cacheinfo *this_leaf)
168 unsigned int line_size = this_leaf->coherency_line_size;
169 unsigned int nr_sets = this_leaf->number_of_sets;
170 unsigned int size = this_leaf->size;
173 * If the cache is fully associative, there is no need to
174 * check the other properties.
176 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
177 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
180 static bool cache_node_is_unified(struct cacheinfo *this_leaf,
181 struct device_node *np)
183 return of_property_read_bool(np, "cache-unified");
186 static void cache_of_set_props(struct cacheinfo *this_leaf,
187 struct device_node *np)
190 * init_cache_level must setup the cache level correctly
191 * overriding the architecturally specified levels, so
192 * if type is NONE at this stage, it should be unified
194 if (this_leaf->type == CACHE_TYPE_NOCACHE &&
195 cache_node_is_unified(this_leaf, np))
196 this_leaf->type = CACHE_TYPE_UNIFIED;
197 cache_size(this_leaf, np);
198 cache_get_line_size(this_leaf, np);
199 cache_nr_sets(this_leaf, np);
200 cache_associativity(this_leaf);
203 static int cache_setup_of_node(unsigned int cpu)
205 struct device_node *np, *prev;
206 struct cacheinfo *this_leaf;
207 unsigned int index = 0;
209 np = of_cpu_device_node_get(cpu);
211 pr_err("Failed to find cpu%d device node\n", cpu);
215 if (!of_check_cache_nodes(np)) {
222 while (index < cache_leaves(cpu)) {
223 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
224 if (this_leaf->level != 1) {
225 np = of_find_next_cache_node(np);
231 cache_of_set_props(this_leaf, np);
232 this_leaf->fw_token = np;
238 if (index != cache_leaves(cpu)) /* not all OF nodes populated */
244 static bool of_check_cache_nodes(struct device_node *np)
246 struct device_node *next;
248 if (of_property_present(np, "cache-size") ||
249 of_property_present(np, "i-cache-size") ||
250 of_property_present(np, "d-cache-size") ||
251 of_property_present(np, "cache-unified"))
254 next = of_find_next_cache_node(np);
263 static int of_count_cache_leaves(struct device_node *np)
265 unsigned int leaves = 0;
267 if (of_property_read_bool(np, "cache-size"))
269 if (of_property_read_bool(np, "i-cache-size"))
271 if (of_property_read_bool(np, "d-cache-size"))
275 /* The '[i-|d-|]cache-size' property is required, but
276 * if absent, fallback on the 'cache-unified' property.
278 if (of_property_read_bool(np, "cache-unified"))
287 int init_of_cache_level(unsigned int cpu)
289 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
290 struct device_node *np = of_cpu_device_node_get(cpu);
291 struct device_node *prev = NULL;
292 unsigned int levels = 0, leaves, level;
294 if (!of_check_cache_nodes(np)) {
299 leaves = of_count_cache_leaves(np);
304 while ((np = of_find_next_cache_node(np))) {
307 if (!of_device_is_compatible(np, "cache"))
309 if (of_property_read_u32(np, "cache-level", &level))
314 leaves += of_count_cache_leaves(np);
319 this_cpu_ci->num_levels = levels;
320 this_cpu_ci->num_leaves = leaves;
330 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
331 int init_of_cache_level(unsigned int cpu) { return 0; }
334 int __weak cache_setup_acpi(unsigned int cpu)
339 unsigned int coherency_max_size;
341 static int cache_setup_properties(unsigned int cpu)
345 if (of_have_populated_dt())
346 ret = cache_setup_of_node(cpu);
347 else if (!acpi_disabled)
348 ret = cache_setup_acpi(cpu);
350 // Assume there is no cache information available in DT/ACPI from now.
351 if (ret && use_arch_cache_info())
352 use_arch_info = true;
357 static int cache_shared_cpu_map_setup(unsigned int cpu)
359 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
360 struct cacheinfo *this_leaf, *sib_leaf;
361 unsigned int index, sib_index;
364 if (this_cpu_ci->cpu_map_populated)
368 * skip setting up cache properties if LLC is valid, just need
369 * to update the shared cpu_map if the cache attributes were
370 * populated early before all the cpus are brought online
372 if (!last_level_cache_is_valid(cpu) && !use_arch_info) {
373 ret = cache_setup_properties(cpu);
378 for (index = 0; index < cache_leaves(cpu); index++) {
381 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
383 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
384 for_each_online_cpu(i) {
385 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
387 if (i == cpu || !sib_cpu_ci->info_list)
388 continue;/* skip if itself or no cacheinfo */
389 for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
390 sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
391 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
392 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
393 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
398 /* record the maximum cache line size */
399 if (this_leaf->coherency_line_size > coherency_max_size)
400 coherency_max_size = this_leaf->coherency_line_size;
406 static void cache_shared_cpu_map_remove(unsigned int cpu)
408 struct cacheinfo *this_leaf, *sib_leaf;
409 unsigned int sibling, index, sib_index;
411 for (index = 0; index < cache_leaves(cpu); index++) {
412 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
413 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
414 struct cpu_cacheinfo *sib_cpu_ci =
415 get_cpu_cacheinfo(sibling);
417 if (sibling == cpu || !sib_cpu_ci->info_list)
418 continue;/* skip if itself or no cacheinfo */
420 for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
421 sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
422 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
423 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
424 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
432 static void free_cache_attributes(unsigned int cpu)
434 if (!per_cpu_cacheinfo(cpu))
437 cache_shared_cpu_map_remove(cpu);
440 int __weak early_cache_level(unsigned int cpu)
445 int __weak init_cache_level(unsigned int cpu)
450 int __weak populate_cache_leaves(unsigned int cpu)
456 int allocate_cache_info(int cpu)
458 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
459 sizeof(struct cacheinfo), GFP_ATOMIC);
460 if (!per_cpu_cacheinfo(cpu)) {
461 cache_leaves(cpu) = 0;
468 int fetch_cache_info(unsigned int cpu)
470 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
471 unsigned int levels = 0, split_levels = 0;
475 ret = init_of_cache_level(cpu);
477 ret = acpi_get_cache_info(cpu, &levels, &split_levels);
479 this_cpu_ci->num_levels = levels;
482 * - there cannot be any split caches (data/instruction)
483 * above a unified cache
484 * - data/instruction caches come by pair
486 this_cpu_ci->num_leaves = levels + split_levels;
490 if (ret || !cache_leaves(cpu)) {
491 ret = early_cache_level(cpu);
495 if (!cache_leaves(cpu))
498 this_cpu_ci->early_ci_levels = true;
501 return allocate_cache_info(cpu);
504 static inline int init_level_allocate_ci(unsigned int cpu)
506 unsigned int early_leaves = cache_leaves(cpu);
508 /* Since early initialization/allocation of the cacheinfo is allowed
509 * via fetch_cache_info() and this also gets called as CPU hotplug
510 * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
511 * as it will happen only once (the cacheinfo memory is never freed).
512 * Just populate the cacheinfo. However, if the cacheinfo has been
513 * allocated early through the arch-specific early_cache_level() call,
514 * there is a chance the info is wrong (this can happen on arm64). In
515 * that case, call init_cache_level() anyway to give the arch-specific
516 * code a chance to make things right.
518 if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels)
521 if (init_cache_level(cpu) || !cache_leaves(cpu))
525 * Now that we have properly initialized the cache level info, make
526 * sure we don't try to do that again the next time we are called
527 * (e.g. as CPU hotplug callbacks).
529 ci_cacheinfo(cpu)->early_ci_levels = false;
531 if (cache_leaves(cpu) <= early_leaves)
534 kfree(per_cpu_cacheinfo(cpu));
535 return allocate_cache_info(cpu);
538 int detect_cache_attributes(unsigned int cpu)
542 ret = init_level_allocate_ci(cpu);
547 * populate_cache_leaves() may completely setup the cache leaves and
548 * shared_cpu_map or it may leave it partially setup.
550 ret = populate_cache_leaves(cpu);
555 * For systems using DT for cache hierarchy, fw_token
556 * and shared_cpu_map will be set up here only if they are
557 * not populated already
559 ret = cache_shared_cpu_map_setup(cpu);
561 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
568 free_cache_attributes(cpu);
572 /* pointer to cpuX/cache device */
573 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
574 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
576 static cpumask_t cache_dev_map;
578 /* pointer to array of devices for cpuX/cache/indexY */
579 static DEFINE_PER_CPU(struct device **, ci_index_dev);
580 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
581 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
583 #define show_one(file_name, object) \
584 static ssize_t file_name##_show(struct device *dev, \
585 struct device_attribute *attr, char *buf) \
587 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
588 return sysfs_emit(buf, "%u\n", this_leaf->object); \
592 show_one(level, level);
593 show_one(coherency_line_size, coherency_line_size);
594 show_one(number_of_sets, number_of_sets);
595 show_one(physical_line_partition, physical_line_partition);
596 show_one(ways_of_associativity, ways_of_associativity);
598 static ssize_t size_show(struct device *dev,
599 struct device_attribute *attr, char *buf)
601 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
603 return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
606 static ssize_t shared_cpu_map_show(struct device *dev,
607 struct device_attribute *attr, char *buf)
609 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
610 const struct cpumask *mask = &this_leaf->shared_cpu_map;
612 return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
615 static ssize_t shared_cpu_list_show(struct device *dev,
616 struct device_attribute *attr, char *buf)
618 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
619 const struct cpumask *mask = &this_leaf->shared_cpu_map;
621 return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
624 static ssize_t type_show(struct device *dev,
625 struct device_attribute *attr, char *buf)
627 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
630 switch (this_leaf->type) {
631 case CACHE_TYPE_DATA:
634 case CACHE_TYPE_INST:
635 output = "Instruction";
637 case CACHE_TYPE_UNIFIED:
644 return sysfs_emit(buf, "%s\n", output);
647 static ssize_t allocation_policy_show(struct device *dev,
648 struct device_attribute *attr, char *buf)
650 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
651 unsigned int ci_attr = this_leaf->attributes;
654 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
655 output = "ReadWriteAllocate";
656 else if (ci_attr & CACHE_READ_ALLOCATE)
657 output = "ReadAllocate";
658 else if (ci_attr & CACHE_WRITE_ALLOCATE)
659 output = "WriteAllocate";
663 return sysfs_emit(buf, "%s\n", output);
666 static ssize_t write_policy_show(struct device *dev,
667 struct device_attribute *attr, char *buf)
669 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
670 unsigned int ci_attr = this_leaf->attributes;
673 if (ci_attr & CACHE_WRITE_THROUGH)
674 n = sysfs_emit(buf, "WriteThrough\n");
675 else if (ci_attr & CACHE_WRITE_BACK)
676 n = sysfs_emit(buf, "WriteBack\n");
680 static DEVICE_ATTR_RO(id);
681 static DEVICE_ATTR_RO(level);
682 static DEVICE_ATTR_RO(type);
683 static DEVICE_ATTR_RO(coherency_line_size);
684 static DEVICE_ATTR_RO(ways_of_associativity);
685 static DEVICE_ATTR_RO(number_of_sets);
686 static DEVICE_ATTR_RO(size);
687 static DEVICE_ATTR_RO(allocation_policy);
688 static DEVICE_ATTR_RO(write_policy);
689 static DEVICE_ATTR_RO(shared_cpu_map);
690 static DEVICE_ATTR_RO(shared_cpu_list);
691 static DEVICE_ATTR_RO(physical_line_partition);
693 static struct attribute *cache_default_attrs[] = {
696 &dev_attr_level.attr,
697 &dev_attr_shared_cpu_map.attr,
698 &dev_attr_shared_cpu_list.attr,
699 &dev_attr_coherency_line_size.attr,
700 &dev_attr_ways_of_associativity.attr,
701 &dev_attr_number_of_sets.attr,
703 &dev_attr_allocation_policy.attr,
704 &dev_attr_write_policy.attr,
705 &dev_attr_physical_line_partition.attr,
710 cache_default_attrs_is_visible(struct kobject *kobj,
711 struct attribute *attr, int unused)
713 struct device *dev = kobj_to_dev(kobj);
714 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
715 const struct cpumask *mask = &this_leaf->shared_cpu_map;
716 umode_t mode = attr->mode;
718 if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
720 if ((attr == &dev_attr_type.attr) && this_leaf->type)
722 if ((attr == &dev_attr_level.attr) && this_leaf->level)
724 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
726 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
728 if ((attr == &dev_attr_coherency_line_size.attr) &&
729 this_leaf->coherency_line_size)
731 if ((attr == &dev_attr_ways_of_associativity.attr) &&
732 this_leaf->size) /* allow 0 = full associativity */
734 if ((attr == &dev_attr_number_of_sets.attr) &&
735 this_leaf->number_of_sets)
737 if ((attr == &dev_attr_size.attr) && this_leaf->size)
739 if ((attr == &dev_attr_write_policy.attr) &&
740 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
742 if ((attr == &dev_attr_allocation_policy.attr) &&
743 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
745 if ((attr == &dev_attr_physical_line_partition.attr) &&
746 this_leaf->physical_line_partition)
752 static const struct attribute_group cache_default_group = {
753 .attrs = cache_default_attrs,
754 .is_visible = cache_default_attrs_is_visible,
757 static const struct attribute_group *cache_default_groups[] = {
758 &cache_default_group,
762 static const struct attribute_group *cache_private_groups[] = {
763 &cache_default_group,
764 NULL, /* Place holder for private group */
768 const struct attribute_group *
769 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
774 static const struct attribute_group **
775 cache_get_attribute_groups(struct cacheinfo *this_leaf)
777 const struct attribute_group *priv_group =
778 cache_get_priv_group(this_leaf);
781 return cache_default_groups;
783 if (!cache_private_groups[1])
784 cache_private_groups[1] = priv_group;
786 return cache_private_groups;
789 /* Add/Remove cache interface for CPU device */
790 static void cpu_cache_sysfs_exit(unsigned int cpu)
793 struct device *ci_dev;
795 if (per_cpu_index_dev(cpu)) {
796 for (i = 0; i < cache_leaves(cpu); i++) {
797 ci_dev = per_cache_index_dev(cpu, i);
800 device_unregister(ci_dev);
802 kfree(per_cpu_index_dev(cpu));
803 per_cpu_index_dev(cpu) = NULL;
805 device_unregister(per_cpu_cache_dev(cpu));
806 per_cpu_cache_dev(cpu) = NULL;
809 static int cpu_cache_sysfs_init(unsigned int cpu)
811 struct device *dev = get_cpu_device(cpu);
813 if (per_cpu_cacheinfo(cpu) == NULL)
816 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
817 if (IS_ERR(per_cpu_cache_dev(cpu)))
818 return PTR_ERR(per_cpu_cache_dev(cpu));
820 /* Allocate all required memory */
821 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
822 sizeof(struct device *), GFP_KERNEL);
823 if (unlikely(per_cpu_index_dev(cpu) == NULL))
829 cpu_cache_sysfs_exit(cpu);
833 static int cache_add_dev(unsigned int cpu)
837 struct device *ci_dev, *parent;
838 struct cacheinfo *this_leaf;
839 const struct attribute_group **cache_groups;
841 rc = cpu_cache_sysfs_init(cpu);
842 if (unlikely(rc < 0))
845 parent = per_cpu_cache_dev(cpu);
846 for (i = 0; i < cache_leaves(cpu); i++) {
847 this_leaf = per_cpu_cacheinfo_idx(cpu, i);
848 if (this_leaf->disable_sysfs)
850 if (this_leaf->type == CACHE_TYPE_NOCACHE)
852 cache_groups = cache_get_attribute_groups(this_leaf);
853 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
855 if (IS_ERR(ci_dev)) {
856 rc = PTR_ERR(ci_dev);
859 per_cache_index_dev(cpu, i) = ci_dev;
861 cpumask_set_cpu(cpu, &cache_dev_map);
865 cpu_cache_sysfs_exit(cpu);
869 static int cacheinfo_cpu_online(unsigned int cpu)
871 int rc = detect_cache_attributes(cpu);
875 rc = cache_add_dev(cpu);
877 free_cache_attributes(cpu);
881 static int cacheinfo_cpu_pre_down(unsigned int cpu)
883 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
884 cpu_cache_sysfs_exit(cpu);
886 free_cache_attributes(cpu);
890 static int __init cacheinfo_sysfs_init(void)
892 return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
893 "base/cacheinfo:online",
894 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
896 device_initcall(cacheinfo_sysfs_init);