2 * Processor cache information made available to userspace via sysfs;
3 * intended to be compatible with x86 intel_cacheinfo implementation.
5 * Copyright 2008 IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/kobject.h>
18 #include <linux/list.h>
19 #include <linux/notifier.h>
21 #include <linux/percpu.h>
22 #include <linux/slab.h>
25 #include "cacheinfo.h"
27 /* per-cpu object for tracking:
28 * - a "cache" kobject for the top-level directory
29 * - a list of "index" objects representing the cpu's local cache hierarchy
32 struct kobject *kobj; /* bare (not embedded) kobject for cache
34 struct cache_index_dir *index; /* list of index objects */
37 /* "index" object: each cpu's cache directory has an index
38 * subdirectory corresponding to a cache object associated with the
39 * cpu. This object's lifetime is managed via the embedded kobject.
41 struct cache_index_dir {
43 struct cache_index_dir *next; /* next index in parent directory */
47 /* Template for determining which OF properties to query for a given
49 struct cache_type_info {
51 const char *size_prop;
53 /* Allow for both [di]-cache-line-size and
54 * [di]-cache-block-size properties. According to the PowerPC
55 * Processor binding, -line-size should be provided if it
56 * differs from the cache block size (that which is operated
57 * on by cache instructions), so we look for -line-size first.
58 * See cache_get_line_size(). */
60 const char *line_size_props[2];
61 const char *nr_sets_prop;
64 /* These are used to index the cache_type_info array. */
65 #define CACHE_TYPE_UNIFIED 0
66 #define CACHE_TYPE_INSTRUCTION 1
67 #define CACHE_TYPE_DATA 2
69 static const struct cache_type_info cache_type_info[] = {
71 /* PowerPC Processor binding says the [di]-cache-*
72 * must be equal on unified caches, so just use
73 * d-cache properties. */
75 .size_prop = "d-cache-size",
76 .line_size_props = { "d-cache-line-size",
77 "d-cache-block-size", },
78 .nr_sets_prop = "d-cache-sets",
81 .name = "Instruction",
82 .size_prop = "i-cache-size",
83 .line_size_props = { "i-cache-line-size",
84 "i-cache-block-size", },
85 .nr_sets_prop = "i-cache-sets",
89 .size_prop = "d-cache-size",
90 .line_size_props = { "d-cache-line-size",
91 "d-cache-block-size", },
92 .nr_sets_prop = "d-cache-sets",
96 /* Cache object: each instance of this corresponds to a distinct cache
97 * in the system. There are separate objects for Harvard caches: one
98 * each for instruction and data, and each refers to the same OF node.
99 * The refcount of the OF node is elevated for the lifetime of the
100 * cache object. A cache object is released when its shared_cpu_map
101 * is cleared (see cache_cpu_clear).
103 * A cache object is on two lists: an unsorted global list
104 * (cache_list) of cache objects; and a singly-linked list
105 * representing the local cache hierarchy, which is ordered by level
106 * (e.g. L1d -> L1i -> L2 -> L3).
109 struct device_node *ofnode; /* OF node for this cache, may be cpu */
110 struct cpumask shared_cpu_map; /* online CPUs using this cache */
111 int type; /* split cache disambiguation */
112 int level; /* level not explicit in device tree */
113 struct list_head list; /* global list of cache objects */
114 struct cache *next_local; /* next cache of >= level */
117 static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
119 /* traversal/modification of this list occurs only at cpu hotplug time;
120 * access is serialized by cpu hotplug locking
122 static LIST_HEAD(cache_list);
124 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
126 return container_of(k, struct cache_index_dir, kobj);
129 static const char *cache_type_string(const struct cache *cache)
131 return cache_type_info[cache->type].name;
134 static void cache_init(struct cache *cache, int type, int level,
135 struct device_node *ofnode)
138 cache->level = level;
139 cache->ofnode = of_node_get(ofnode);
140 INIT_LIST_HEAD(&cache->list);
141 list_add(&cache->list, &cache_list);
144 static struct cache *new_cache(int type, int level, struct device_node *ofnode)
148 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
150 cache_init(cache, type, level, ofnode);
155 static void release_cache_debugcheck(struct cache *cache)
159 list_for_each_entry(iter, &cache_list, list)
160 WARN_ONCE(iter->next_local == cache,
161 "cache for %s(%s) refers to cache for %s(%s)\n",
162 iter->ofnode->full_name,
163 cache_type_string(iter),
164 cache->ofnode->full_name,
165 cache_type_string(cache));
168 static void release_cache(struct cache *cache)
173 pr_debug("freeing L%d %s cache for %s\n", cache->level,
174 cache_type_string(cache), cache->ofnode->full_name);
176 release_cache_debugcheck(cache);
177 list_del(&cache->list);
178 of_node_put(cache->ofnode);
182 static void cache_cpu_set(struct cache *cache, int cpu)
184 struct cache *next = cache;
187 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
188 "CPU %i already accounted in %s(%s)\n",
189 cpu, next->ofnode->full_name,
190 cache_type_string(next));
191 cpumask_set_cpu(cpu, &next->shared_cpu_map);
192 next = next->next_local;
196 static int cache_size(const struct cache *cache, unsigned int *ret)
198 const char *propname;
199 const __be32 *cache_size;
201 propname = cache_type_info[cache->type].size_prop;
203 cache_size = of_get_property(cache->ofnode, propname, NULL);
207 *ret = of_read_number(cache_size, 1);
211 static int cache_size_kb(const struct cache *cache, unsigned int *ret)
215 if (cache_size(cache, &size))
222 /* not cache_line_size() because that's a macro in include/linux/cache.h */
223 static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
225 const __be32 *line_size;
228 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
230 for (i = 0; i < lim; i++) {
231 const char *propname;
233 propname = cache_type_info[cache->type].line_size_props[i];
234 line_size = of_get_property(cache->ofnode, propname, NULL);
242 *ret = of_read_number(line_size, 1);
246 static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
248 const char *propname;
249 const __be32 *nr_sets;
251 propname = cache_type_info[cache->type].nr_sets_prop;
253 nr_sets = of_get_property(cache->ofnode, propname, NULL);
257 *ret = of_read_number(nr_sets, 1);
261 static int cache_associativity(const struct cache *cache, unsigned int *ret)
263 unsigned int line_size;
264 unsigned int nr_sets;
267 if (cache_nr_sets(cache, &nr_sets))
270 /* If the cache is fully associative, there is no need to
271 * check the other properties.
278 if (cache_get_line_size(cache, &line_size))
280 if (cache_size(cache, &size))
283 if (!(nr_sets > 0 && size > 0 && line_size > 0))
286 *ret = (size / nr_sets) / line_size;
292 /* helper for dealing with split caches */
293 static struct cache *cache_find_first_sibling(struct cache *cache)
297 if (cache->type == CACHE_TYPE_UNIFIED)
300 list_for_each_entry(iter, &cache_list, list)
301 if (iter->ofnode == cache->ofnode && iter->next_local == cache)
307 /* return the first cache on a local list matching node */
308 static struct cache *cache_lookup_by_node(const struct device_node *node)
310 struct cache *cache = NULL;
313 list_for_each_entry(iter, &cache_list, list) {
314 if (iter->ofnode != node)
316 cache = cache_find_first_sibling(iter);
323 static bool cache_node_is_unified(const struct device_node *np)
325 return of_get_property(np, "cache-unified", NULL);
328 static struct cache *cache_do_one_devnode_unified(struct device_node *node,
333 pr_debug("creating L%d ucache for %s\n", level, node->full_name);
335 cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
340 static struct cache *cache_do_one_devnode_split(struct device_node *node,
343 struct cache *dcache, *icache;
345 pr_debug("creating L%d dcache and icache for %s\n", level,
348 dcache = new_cache(CACHE_TYPE_DATA, level, node);
349 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
351 if (!dcache || !icache)
354 dcache->next_local = icache;
358 release_cache(dcache);
359 release_cache(icache);
363 static struct cache *cache_do_one_devnode(struct device_node *node, int level)
367 if (cache_node_is_unified(node))
368 cache = cache_do_one_devnode_unified(node, level);
370 cache = cache_do_one_devnode_split(node, level);
375 static struct cache *cache_lookup_or_instantiate(struct device_node *node,
380 cache = cache_lookup_by_node(node);
382 WARN_ONCE(cache && cache->level != level,
383 "cache level mismatch on lookup (got %d, expected %d)\n",
384 cache->level, level);
387 cache = cache_do_one_devnode(node, level);
392 static void link_cache_lists(struct cache *smaller, struct cache *bigger)
394 while (smaller->next_local) {
395 if (smaller->next_local == bigger)
396 return; /* already linked */
397 smaller = smaller->next_local;
400 smaller->next_local = bigger;
403 static void do_subsidiary_caches_debugcheck(struct cache *cache)
405 WARN_ON_ONCE(cache->level != 1);
406 WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
409 static void do_subsidiary_caches(struct cache *cache)
411 struct device_node *subcache_node;
412 int level = cache->level;
414 do_subsidiary_caches_debugcheck(cache);
416 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
417 struct cache *subcache;
420 subcache = cache_lookup_or_instantiate(subcache_node, level);
421 of_node_put(subcache_node);
425 link_cache_lists(cache, subcache);
430 static struct cache *cache_chain_instantiate(unsigned int cpu_id)
432 struct device_node *cpu_node;
433 struct cache *cpu_cache = NULL;
435 pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
437 cpu_node = of_get_cpu_node(cpu_id, NULL);
438 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
442 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
446 do_subsidiary_caches(cpu_cache);
448 cache_cpu_set(cpu_cache, cpu_id);
450 of_node_put(cpu_node);
455 static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
457 struct cache_dir *cache_dir;
459 struct kobject *kobj = NULL;
461 dev = get_cpu_device(cpu_id);
462 WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
466 kobj = kobject_create_and_add("cache", &dev->kobj);
470 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
474 cache_dir->kobj = kobj;
476 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
478 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
486 static void cache_index_release(struct kobject *kobj)
488 struct cache_index_dir *index;
490 index = kobj_to_cache_index_dir(kobj);
492 pr_debug("freeing index directory for L%d %s cache\n",
493 index->cache->level, cache_type_string(index->cache));
498 static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
500 struct kobj_attribute *kobj_attr;
502 kobj_attr = container_of(attr, struct kobj_attribute, attr);
504 return kobj_attr->show(k, kobj_attr, buf);
507 static struct cache *index_kobj_to_cache(struct kobject *k)
509 struct cache_index_dir *index;
511 index = kobj_to_cache_index_dir(k);
516 static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
518 unsigned int size_kb;
521 cache = index_kobj_to_cache(k);
523 if (cache_size_kb(cache, &size_kb))
526 return sprintf(buf, "%uK\n", size_kb);
529 static struct kobj_attribute cache_size_attr =
530 __ATTR(size, 0444, size_show, NULL);
533 static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
535 unsigned int line_size;
538 cache = index_kobj_to_cache(k);
540 if (cache_get_line_size(cache, &line_size))
543 return sprintf(buf, "%u\n", line_size);
546 static struct kobj_attribute cache_line_size_attr =
547 __ATTR(coherency_line_size, 0444, line_size_show, NULL);
549 static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
551 unsigned int nr_sets;
554 cache = index_kobj_to_cache(k);
556 if (cache_nr_sets(cache, &nr_sets))
559 return sprintf(buf, "%u\n", nr_sets);
562 static struct kobj_attribute cache_nr_sets_attr =
563 __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
565 static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
567 unsigned int associativity;
570 cache = index_kobj_to_cache(k);
572 if (cache_associativity(cache, &associativity))
575 return sprintf(buf, "%u\n", associativity);
578 static struct kobj_attribute cache_assoc_attr =
579 __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
581 static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
585 cache = index_kobj_to_cache(k);
587 return sprintf(buf, "%s\n", cache_type_string(cache));
590 static struct kobj_attribute cache_type_attr =
591 __ATTR(type, 0444, type_show, NULL);
593 static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
595 struct cache_index_dir *index;
598 index = kobj_to_cache_index_dir(k);
599 cache = index->cache;
601 return sprintf(buf, "%d\n", cache->level);
604 static struct kobj_attribute cache_level_attr =
605 __ATTR(level, 0444, level_show, NULL);
607 static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
609 struct cache_index_dir *index;
614 index = kobj_to_cache_index_dir(k);
615 cache = index->cache;
619 n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
626 static struct kobj_attribute cache_shared_cpu_map_attr =
627 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
629 /* Attributes which should always be created -- the kobject/sysfs core
630 * does this automatically via kobj_type->default_attrs. This is the
631 * minimum data required to uniquely identify a cache.
633 static struct attribute *cache_index_default_attrs[] = {
634 &cache_type_attr.attr,
635 &cache_level_attr.attr,
636 &cache_shared_cpu_map_attr.attr,
640 /* Attributes which should be created if the cache device node has the
641 * right properties -- see cacheinfo_create_index_opt_attrs
643 static struct kobj_attribute *cache_index_opt_attrs[] = {
645 &cache_line_size_attr,
650 static const struct sysfs_ops cache_index_ops = {
651 .show = cache_index_show,
654 static struct kobj_type cache_index_type = {
655 .release = cache_index_release,
656 .sysfs_ops = &cache_index_ops,
657 .default_attrs = cache_index_default_attrs,
660 static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
662 const char *cache_name;
663 const char *cache_type;
668 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
673 cache_name = cache->ofnode->full_name;
674 cache_type = cache_type_string(cache);
676 /* We don't want to create an attribute that can't provide a
677 * meaningful value. Check the return value of each optional
678 * attribute's ->show method before registering the
681 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
682 struct kobj_attribute *attr;
685 attr = cache_index_opt_attrs[i];
687 rc = attr->show(&dir->kobj, attr, buf);
689 pr_debug("not creating %s attribute for "
690 "%s(%s) (rc = %zd)\n",
691 attr->attr.name, cache_name,
695 if (sysfs_create_file(&dir->kobj, &attr->attr))
696 pr_debug("could not create %s attribute for %s(%s)\n",
697 attr->attr.name, cache_name, cache_type);
703 static void cacheinfo_create_index_dir(struct cache *cache, int index,
704 struct cache_dir *cache_dir)
706 struct cache_index_dir *index_dir;
709 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
713 index_dir->cache = cache;
715 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
716 cache_dir->kobj, "index%d", index);
720 index_dir->next = cache_dir->index;
721 cache_dir->index = index_dir;
723 cacheinfo_create_index_opt_attrs(index_dir);
730 static void cacheinfo_sysfs_populate(unsigned int cpu_id,
731 struct cache *cache_list)
733 struct cache_dir *cache_dir;
737 cache_dir = cacheinfo_create_cache_dir(cpu_id);
743 cacheinfo_create_index_dir(cache, index, cache_dir);
745 cache = cache->next_local;
749 void cacheinfo_cpu_online(unsigned int cpu_id)
753 cache = cache_chain_instantiate(cpu_id);
757 cacheinfo_sysfs_populate(cpu_id, cache);
760 #ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
762 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
764 struct device_node *cpu_node;
767 cpu_node = of_get_cpu_node(cpu_id, NULL);
768 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
772 cache = cache_lookup_by_node(cpu_node);
773 of_node_put(cpu_node);
778 static void remove_index_dirs(struct cache_dir *cache_dir)
780 struct cache_index_dir *index;
782 index = cache_dir->index;
785 struct cache_index_dir *next;
788 kobject_put(&index->kobj);
793 static void remove_cache_dir(struct cache_dir *cache_dir)
795 remove_index_dirs(cache_dir);
797 kobject_put(cache_dir->kobj);
802 static void cache_cpu_clear(struct cache *cache, int cpu)
805 struct cache *next = cache->next_local;
807 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
808 "CPU %i not accounted in %s(%s)\n",
809 cpu, cache->ofnode->full_name,
810 cache_type_string(cache));
812 cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
814 /* Release the cache object if all the cpus using it
816 if (cpumask_empty(&cache->shared_cpu_map))
817 release_cache(cache);
823 void cacheinfo_cpu_offline(unsigned int cpu_id)
825 struct cache_dir *cache_dir;
828 /* Prevent userspace from seeing inconsistent state - remove
829 * the sysfs hierarchy first */
830 cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
832 /* careful, sysfs population may have failed */
834 remove_cache_dir(cache_dir);
836 per_cpu(cache_dir_pcpu, cpu_id) = NULL;
838 /* clear the CPU's bit in its cache chain, possibly freeing
840 cache = cache_lookup_by_cpu(cpu_id);
842 cache_cpu_clear(cache, cpu_id);
844 #endif /* CONFIG_HOTPLUG_CPU */