2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/amd_nb.h>
30 unsigned char descriptor;
35 #define MB(x) ((x) * 1024)
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
40 static const struct _cache_table __cpuinitconst cache_table[] =
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
122 CACHE_TYPE_UNIFIED = 3
125 union _cpuid4_leaf_eax {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
138 union _cpuid4_leaf_ebx {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
147 union _cpuid4_leaf_ecx {
149 unsigned int number_of_sets:32;
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
159 struct amd_northbridge *nb;
162 struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
167 unsigned short num_cache_leaves;
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
173 In theory the TLBs could be reported as fake type (they are in "dummy").
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
180 unsigned size_in_kb:8;
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
190 unsigned size_in_kb:16;
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
201 unsigned size_encoded:14;
206 static const unsigned short __cpuinitconst assocs[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
220 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
223 static void __cpuinit
224 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
233 union l1_cache *l1 = &l1d;
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
248 assoc = assocs[l1->assoc];
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
256 assoc = assocs[l2.assoc];
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
265 assoc = assocs[l3.assoc];
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
281 eax->split.num_threads_sharing = 0;
282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
288 ebx->split.ways_of_associativity = assoc - 1;
289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
295 struct attribute attr;
296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
304 * L3 cache descriptors
306 static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
308 struct amd_l3_cache *l3 = &nb->l3_cache;
309 unsigned int sc0, sc1, sc2, sc3;
312 pci_read_config_dword(nb->misc, 0x1C4, &val);
314 /* calculate subcache sizes */
315 l3->subcaches[0] = sc0 = !(val & BIT(0));
316 l3->subcaches[1] = sc1 = !(val & BIT(4));
318 if (boot_cpu_data.x86 == 0x15) {
319 l3->subcaches[0] = sc0 += !(val & BIT(1));
320 l3->subcaches[1] = sc1 += !(val & BIT(5));
323 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
324 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
329 static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
333 /* only for L3, and not in virtualized environments */
337 node = amd_get_nb_id(smp_processor_id());
338 this_leaf->nb = node_to_amd_nb(node);
339 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
340 amd_calc_l3_indices(this_leaf->nb);
344 * check whether a slot used for disabling an L3 index is occupied.
345 * @l3: L3 cache descriptor
346 * @slot: slot number (0..1)
348 * @returns: the disabled index if used or negative value if slot free.
350 int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
352 unsigned int reg = 0;
354 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
356 /* check whether this slot is activated already */
357 if (reg & (3UL << 30))
363 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
368 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
371 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
373 return sprintf(buf, "%d\n", index);
375 return sprintf(buf, "FREE\n");
378 #define SHOW_CACHE_DISABLE(slot) \
380 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
383 return show_cache_disable(this_leaf, buf, slot); \
385 SHOW_CACHE_DISABLE(0)
386 SHOW_CACHE_DISABLE(1)
388 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
389 unsigned slot, unsigned long idx)
396 * disable index in all 4 subcaches
398 for (i = 0; i < 4; i++) {
399 u32 reg = idx | (i << 20);
401 if (!nb->l3_cache.subcaches[i])
404 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
407 * We need to WBINVD on a core on the node containing the L3
408 * cache which indices we disable therefore a simple wbinvd()
414 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
419 * disable a L3 cache index by using a disable-slot
421 * @l3: L3 cache descriptor
422 * @cpu: A CPU on the node containing the L3 cache
423 * @slot: slot number (0..1)
424 * @index: index to disable
426 * @return: 0 on success, error status on failure
428 int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
433 /* check if @slot is already used or the index is already disabled */
434 ret = amd_get_l3_disable_slot(nb, slot);
438 if (index > nb->l3_cache.indices)
441 /* check whether the other slot has disabled the same index already */
442 if (index == amd_get_l3_disable_slot(nb, !slot))
445 amd_l3_disable_index(nb, cpu, slot, index);
450 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
451 const char *buf, size_t count,
454 unsigned long val = 0;
457 if (!capable(CAP_SYS_ADMIN))
460 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
463 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
465 if (strict_strtoul(buf, 10, &val) < 0)
468 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
471 pr_warning("L3 slot %d in use/index already disabled!\n",
478 #define STORE_CACHE_DISABLE(slot) \
480 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
481 const char *buf, size_t count, \
484 return store_cache_disable(this_leaf, buf, count, slot); \
486 STORE_CACHE_DISABLE(0)
487 STORE_CACHE_DISABLE(1)
489 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
490 show_cache_disable_0, store_cache_disable_0);
491 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
492 show_cache_disable_1, store_cache_disable_1);
495 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
497 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
500 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
504 store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
509 if (!capable(CAP_SYS_ADMIN))
512 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
515 if (strict_strtoul(buf, 16, &val) < 0)
518 if (amd_set_subcaches(cpu, val))
524 static struct _cache_attr subcaches =
525 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
527 #else /* CONFIG_AMD_NB */
528 #define amd_init_l3_cache(x, y)
529 #endif /* CONFIG_AMD_NB */
532 __cpuinit cpuid4_cache_lookup_regs(int index,
533 struct _cpuid4_info_regs *this_leaf)
535 union _cpuid4_leaf_eax eax;
536 union _cpuid4_leaf_ebx ebx;
537 union _cpuid4_leaf_ecx ecx;
540 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
542 cpuid_count(0x8000001d, index, &eax.full,
543 &ebx.full, &ecx.full, &edx);
545 amd_cpuid4(index, &eax, &ebx, &ecx);
546 amd_init_l3_cache(this_leaf, index);
548 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
551 if (eax.split.type == CACHE_TYPE_NULL)
552 return -EIO; /* better error ? */
554 this_leaf->eax = eax;
555 this_leaf->ebx = ebx;
556 this_leaf->ecx = ecx;
557 this_leaf->size = (ecx.split.number_of_sets + 1) *
558 (ebx.split.coherency_line_size + 1) *
559 (ebx.split.physical_line_partition + 1) *
560 (ebx.split.ways_of_associativity + 1);
564 static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
566 unsigned int eax, ebx, ecx, edx, op;
567 union _cpuid4_leaf_eax cache_eax;
570 if (c->x86_vendor == X86_VENDOR_AMD)
577 /* Do cpuid(op) loop to find out num_cache_leaves */
578 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
579 cache_eax.full = eax;
580 } while (cache_eax.split.type != CACHE_TYPE_NULL);
584 void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
587 if (cpu_has_topoext) {
588 num_cache_leaves = find_num_cache_leaves(c);
589 } else if (c->extended_cpuid_level >= 0x80000006) {
590 if (cpuid_edx(0x80000006) & 0xf000)
591 num_cache_leaves = 4;
593 num_cache_leaves = 3;
597 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
600 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
601 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
602 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
603 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
605 unsigned int cpu = c->cpu_index;
608 if (c->cpuid_level > 3) {
609 static int is_initialized;
611 if (is_initialized == 0) {
612 /* Init num_cache_leaves from boot CPU */
613 num_cache_leaves = find_num_cache_leaves(c);
618 * Whenever possible use cpuid(4), deterministic cache
619 * parameters cpuid leaf to find the cache details
621 for (i = 0; i < num_cache_leaves; i++) {
622 struct _cpuid4_info_regs this_leaf;
625 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
627 switch (this_leaf.eax.split.level) {
629 if (this_leaf.eax.split.type ==
631 new_l1d = this_leaf.size/1024;
632 else if (this_leaf.eax.split.type ==
634 new_l1i = this_leaf.size/1024;
637 new_l2 = this_leaf.size/1024;
638 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
639 index_msb = get_count_order(num_threads_sharing);
640 l2_id = c->apicid & ~((1 << index_msb) - 1);
643 new_l3 = this_leaf.size/1024;
644 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
645 index_msb = get_count_order(
646 num_threads_sharing);
647 l3_id = c->apicid & ~((1 << index_msb) - 1);
656 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
659 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
660 /* supports eax=2 call */
662 unsigned int regs[4];
663 unsigned char *dp = (unsigned char *)regs;
666 if (num_cache_leaves != 0 && c->x86 == 15)
669 /* Number of times to iterate */
670 n = cpuid_eax(2) & 0xFF;
672 for (i = 0 ; i < n ; i++) {
673 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
675 /* If bit 31 is set, this is an unknown format */
676 for (j = 0 ; j < 3 ; j++)
677 if (regs[j] & (1 << 31))
680 /* Byte 0 is level count, not a descriptor */
681 for (j = 1 ; j < 16 ; j++) {
682 unsigned char des = dp[j];
685 /* look up this descriptor in the table */
686 while (cache_table[k].descriptor != 0) {
687 if (cache_table[k].descriptor == des) {
688 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
690 switch (cache_table[k].cache_type) {
692 l1i += cache_table[k].size;
695 l1d += cache_table[k].size;
698 l2 += cache_table[k].size;
701 l3 += cache_table[k].size;
704 trace += cache_table[k].size;
726 per_cpu(cpu_llc_id, cpu) = l2_id;
733 per_cpu(cpu_llc_id, cpu) = l3_id;
737 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
744 /* pointer to _cpuid4_info array (for each cache leaf) */
745 static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
746 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
750 static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
752 struct _cpuid4_info *this_leaf;
754 struct cpuinfo_x86 *c = &cpu_data(cpu);
759 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
760 if (!per_cpu(ici_cpuid4_info, i))
762 this_leaf = CPUID4_INFO_IDX(i, index);
763 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
764 if (!cpu_online(sibling))
766 set_bit(sibling, this_leaf->shared_cpu_map);
769 } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
771 for_each_cpu(i, cpu_sibling_mask(cpu)) {
772 if (!per_cpu(ici_cpuid4_info, i))
774 this_leaf = CPUID4_INFO_IDX(i, index);
775 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
776 if (!cpu_online(sibling))
778 set_bit(sibling, this_leaf->shared_cpu_map);
786 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
788 struct _cpuid4_info *this_leaf, *sibling_leaf;
789 unsigned long num_threads_sharing;
791 struct cpuinfo_x86 *c = &cpu_data(cpu);
793 if (c->x86_vendor == X86_VENDOR_AMD) {
794 if (cache_shared_amd_cpu_map_setup(cpu, index))
798 this_leaf = CPUID4_INFO_IDX(cpu, index);
799 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
801 if (num_threads_sharing == 1)
802 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
804 index_msb = get_count_order(num_threads_sharing);
806 for_each_online_cpu(i) {
807 if (cpu_data(i).apicid >> index_msb ==
808 c->apicid >> index_msb) {
810 to_cpumask(this_leaf->shared_cpu_map));
811 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
813 CPUID4_INFO_IDX(i, index);
814 cpumask_set_cpu(cpu, to_cpumask(
815 sibling_leaf->shared_cpu_map));
821 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
823 struct _cpuid4_info *this_leaf, *sibling_leaf;
826 this_leaf = CPUID4_INFO_IDX(cpu, index);
827 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
828 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
829 cpumask_clear_cpu(cpu,
830 to_cpumask(sibling_leaf->shared_cpu_map));
834 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
838 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
843 static void __cpuinit free_cache_attributes(unsigned int cpu)
847 for (i = 0; i < num_cache_leaves; i++)
848 cache_remove_shared_cpu_map(cpu, i);
850 kfree(per_cpu(ici_cpuid4_info, cpu));
851 per_cpu(ici_cpuid4_info, cpu) = NULL;
854 static void __cpuinit get_cpu_leaves(void *_retval)
856 int j, *retval = _retval, cpu = smp_processor_id();
858 /* Do cpuid and store the results */
859 for (j = 0; j < num_cache_leaves; j++) {
860 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
862 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
863 if (unlikely(*retval < 0)) {
866 for (i = 0; i < j; i++)
867 cache_remove_shared_cpu_map(cpu, i);
870 cache_shared_cpu_map_setup(cpu, j);
874 static int __cpuinit detect_cache_attributes(unsigned int cpu)
878 if (num_cache_leaves == 0)
881 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
882 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
883 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
886 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
888 kfree(per_cpu(ici_cpuid4_info, cpu));
889 per_cpu(ici_cpuid4_info, cpu) = NULL;
895 #include <linux/kobject.h>
896 #include <linux/sysfs.h>
897 #include <linux/cpu.h>
899 /* pointer to kobject for cpuX/cache */
900 static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
902 struct _index_kobject {
905 unsigned short index;
908 /* pointer to array of kobjects for cpuX/cache/indexY */
909 static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
910 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
912 #define show_one_plus(file_name, object, val) \
913 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
916 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
919 show_one_plus(level, base.eax.split.level, 0);
920 show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
921 show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
922 show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
923 show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
925 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
928 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
931 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
934 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
938 const struct cpumask *mask;
940 mask = to_cpumask(this_leaf->shared_cpu_map);
942 cpulist_scnprintf(buf, len-2, mask) :
943 cpumask_scnprintf(buf, len-2, mask);
950 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
953 return show_shared_cpu_map_func(leaf, 0, buf);
956 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
959 return show_shared_cpu_map_func(leaf, 1, buf);
962 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
965 switch (this_leaf->base.eax.split.type) {
966 case CACHE_TYPE_DATA:
967 return sprintf(buf, "Data\n");
968 case CACHE_TYPE_INST:
969 return sprintf(buf, "Instruction\n");
970 case CACHE_TYPE_UNIFIED:
971 return sprintf(buf, "Unified\n");
973 return sprintf(buf, "Unknown\n");
977 #define to_object(k) container_of(k, struct _index_kobject, kobj)
978 #define to_attr(a) container_of(a, struct _cache_attr, attr)
980 #define define_one_ro(_name) \
981 static struct _cache_attr _name = \
982 __ATTR(_name, 0444, show_##_name, NULL)
984 define_one_ro(level);
986 define_one_ro(coherency_line_size);
987 define_one_ro(physical_line_partition);
988 define_one_ro(ways_of_associativity);
989 define_one_ro(number_of_sets);
991 define_one_ro(shared_cpu_map);
992 define_one_ro(shared_cpu_list);
994 static struct attribute *default_attrs[] = {
997 &coherency_line_size.attr,
998 &physical_line_partition.attr,
999 &ways_of_associativity.attr,
1000 &number_of_sets.attr,
1002 &shared_cpu_map.attr,
1003 &shared_cpu_list.attr,
1007 #ifdef CONFIG_AMD_NB
1008 static struct attribute ** __cpuinit amd_l3_attrs(void)
1010 static struct attribute **attrs;
1016 n = ARRAY_SIZE(default_attrs);
1018 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
1021 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1024 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1026 return attrs = default_attrs;
1028 for (n = 0; default_attrs[n]; n++)
1029 attrs[n] = default_attrs[n];
1031 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1032 attrs[n++] = &cache_disable_0.attr;
1033 attrs[n++] = &cache_disable_1.attr;
1036 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1037 attrs[n++] = &subcaches.attr;
1043 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1045 struct _cache_attr *fattr = to_attr(attr);
1046 struct _index_kobject *this_leaf = to_object(kobj);
1050 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1051 buf, this_leaf->cpu) :
1056 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1057 const char *buf, size_t count)
1059 struct _cache_attr *fattr = to_attr(attr);
1060 struct _index_kobject *this_leaf = to_object(kobj);
1063 ret = fattr->store ?
1064 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1065 buf, count, this_leaf->cpu) :
1070 static const struct sysfs_ops sysfs_ops = {
1075 static struct kobj_type ktype_cache = {
1076 .sysfs_ops = &sysfs_ops,
1077 .default_attrs = default_attrs,
1080 static struct kobj_type ktype_percpu_entry = {
1081 .sysfs_ops = &sysfs_ops,
1084 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1086 kfree(per_cpu(ici_cache_kobject, cpu));
1087 kfree(per_cpu(ici_index_kobject, cpu));
1088 per_cpu(ici_cache_kobject, cpu) = NULL;
1089 per_cpu(ici_index_kobject, cpu) = NULL;
1090 free_cache_attributes(cpu);
1093 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1097 if (num_cache_leaves == 0)
1100 err = detect_cache_attributes(cpu);
1104 /* Allocate all required memory */
1105 per_cpu(ici_cache_kobject, cpu) =
1106 kzalloc(sizeof(struct kobject), GFP_KERNEL);
1107 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1110 per_cpu(ici_index_kobject, cpu) = kzalloc(
1111 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
1112 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1118 cpuid4_cache_sysfs_exit(cpu);
1122 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1124 /* Add/Remove cache interface for CPU device */
1125 static int __cpuinit cache_add_dev(struct device *dev)
1127 unsigned int cpu = dev->id;
1129 struct _index_kobject *this_object;
1130 struct _cpuid4_info *this_leaf;
1133 retval = cpuid4_cache_sysfs_init(cpu);
1134 if (unlikely(retval < 0))
1137 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
1138 &ktype_percpu_entry,
1139 &dev->kobj, "%s", "cache");
1141 cpuid4_cache_sysfs_exit(cpu);
1145 for (i = 0; i < num_cache_leaves; i++) {
1146 this_object = INDEX_KOBJECT_PTR(cpu, i);
1147 this_object->cpu = cpu;
1148 this_object->index = i;
1150 this_leaf = CPUID4_INFO_IDX(cpu, i);
1152 ktype_cache.default_attrs = default_attrs;
1153 #ifdef CONFIG_AMD_NB
1154 if (this_leaf->base.nb)
1155 ktype_cache.default_attrs = amd_l3_attrs();
1157 retval = kobject_init_and_add(&(this_object->kobj),
1159 per_cpu(ici_cache_kobject, cpu),
1161 if (unlikely(retval)) {
1162 for (j = 0; j < i; j++)
1163 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1164 kobject_put(per_cpu(ici_cache_kobject, cpu));
1165 cpuid4_cache_sysfs_exit(cpu);
1168 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1170 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1172 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1176 static void __cpuinit cache_remove_dev(struct device *dev)
1178 unsigned int cpu = dev->id;
1181 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1183 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1185 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1187 for (i = 0; i < num_cache_leaves; i++)
1188 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1189 kobject_put(per_cpu(ici_cache_kobject, cpu));
1190 cpuid4_cache_sysfs_exit(cpu);
1193 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1194 unsigned long action, void *hcpu)
1196 unsigned int cpu = (unsigned long)hcpu;
1199 dev = get_cpu_device(cpu);
1202 case CPU_ONLINE_FROZEN:
1206 case CPU_DEAD_FROZEN:
1207 cache_remove_dev(dev);
1213 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1214 .notifier_call = cacheinfo_cpu_callback,
1217 static int __cpuinit cache_sysfs_init(void)
1221 if (num_cache_leaves == 0)
1224 for_each_online_cpu(i) {
1226 struct device *dev = get_cpu_device(i);
1228 err = cache_add_dev(dev);
1232 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1236 device_initcall(cache_sysfs_init);