2 * lscpu - CPU architecture information helper
4 * Copyright (C) 2008 Cai Qian <qcai@redhat.com>
5 * Copyright (C) 2008 Karel Zak <kzak@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it would be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 #include <sys/utsname.h>
33 #include <sys/types.h>
47 #define _PATH_SYS_SYSTEM "/sys/devices/system"
48 #define _PATH_SYS_CPU _PATH_SYS_SYSTEM "/cpu"
49 #define _PATH_PROC_XEN "/proc/xen"
50 #define _PATH_PROC_XENCAP _PATH_PROC_XEN "/capabilities"
51 #define _PATH_PROC_CPUINFO "/proc/cpuinfo"
52 #define _PATH_PROC_PCIDEVS "/proc/bus/pci/devices"
53 #define _PATH_PROC_SYSINFO "/proc/sysinfo"
55 /* virtualization types */
61 const char *virt_types[] = {
62 [VIRT_NONE] = N_("none"),
63 [VIRT_PARA] = N_("para"),
64 [VIRT_FULL] = N_("full")
67 /* hypervisor vendors */
75 const char *hv_vendors[] = {
79 [HYPER_MSHV] = "Microsoft",
80 [HYPER_VMWARE] = "VMware"
85 MODE_32BIT = (1 << 1),
89 /* cache(s) description */
95 cpu_set_t **sharedmaps;
98 /* global description */
104 char *virtflag; /* virtualization flag (vmx, svm) */
105 int hyper; /* hypervisor vendor ID */
106 int virtype; /* VIRT_PARA|FULL|NONE ? */
111 int mode; /* rm, lm or/and tm */
113 int ncpus; /* number of CPUs */
114 cpu_set_t *online; /* mask with online CPUs */
116 int nnodes; /* number of NUMA modes */
117 cpu_set_t **nodemaps; /* array with NUMA nodes */
119 /* books -- based on book_siblings (internal kernel map of cpuX's
120 * hardware threads within the same book */
121 int nbooks; /* number of all online books */
122 cpu_set_t **bookmaps; /* unique book_siblings */
124 /* sockets -- based on core_siblings (internal kernel map of cpuX's
125 * hardware threads within the same physical_package_id (socket)) */
126 int nsockets; /* number of all online sockets */
127 cpu_set_t **socketmaps; /* unique core_siblings */
129 /* cores -- based on thread_siblings (internel kernel map of cpuX's
130 * hardware threads within the same core as cpuX) */
131 int ncores; /* number of all online cores */
132 cpu_set_t **coremaps; /* unique thread_siblings */
134 int nthreads; /* number of online threads */
137 struct cpu_cache *caches;
140 static size_t sysrootlen;
141 static char pathbuf[PATH_MAX];
142 static int maxcpus; /* size in bits of kernel cpu mask */
144 #define is_cpu_online(_d, _cpu) \
145 ((_d) && (_d)->online ? \
146 CPU_ISSET_S((_cpu), CPU_ALLOC_SIZE(maxcpus), (_d)->online) : 0)
148 static FILE *path_fopen(const char *mode, int exit_on_err, const char *path, ...)
149 __attribute__ ((__format__ (__printf__, 3, 4)));
150 static void path_getstr(char *result, size_t len, const char *path, ...)
151 __attribute__ ((__format__ (__printf__, 3, 4)));
152 static int path_getnum(const char *path, ...)
153 __attribute__ ((__format__ (__printf__, 1, 2)));
154 static int path_exist(const char *path, ...)
155 __attribute__ ((__format__ (__printf__, 1, 2)));
156 static cpu_set_t *path_cpuset(const char *path, ...)
157 __attribute__ ((__format__ (__printf__, 1, 2)));
171 static const char *colnames[] =
175 [COL_SOCKET] = "Socket",
178 [COL_CACHE] = "Cache"
182 static int column_name_to_id(const char *name, size_t namesz)
186 for (i = 0; i < ARRAY_SIZE(colnames); i++) {
187 const char *cn = colnames[i];
189 if (!strncasecmp(name, cn, namesz) && !*(cn + namesz))
192 warnx(_("unknown column: %s"), name);
197 path_vcreate(const char *path, va_list ap)
200 vsnprintf(pathbuf + sysrootlen,
201 sizeof(pathbuf) - sysrootlen, path, ap);
203 vsnprintf(pathbuf, sizeof(pathbuf), path, ap);
208 path_vfopen(const char *mode, int exit_on_error, const char *path, va_list ap)
211 const char *p = path_vcreate(path, ap);
214 if (!f && exit_on_error)
215 err(EXIT_FAILURE, _("error: cannot open %s"), p);
220 path_fopen(const char *mode, int exit_on_error, const char *path, ...)
226 fd = path_vfopen(mode, exit_on_error, path, ap);
233 path_getstr(char *result, size_t len, const char *path, ...)
239 fd = path_vfopen("r", 1, path, ap);
242 if (!fgets(result, len, fd))
243 err(EXIT_FAILURE, _("failed to read: %s"), pathbuf);
246 len = strlen(result);
247 if (result[len - 1] == '\n')
248 result[len - 1] = '\0';
252 path_getnum(const char *path, ...)
259 fd = path_vfopen("r", 1, path, ap);
262 if (fscanf(fd, "%d", &result) != 1) {
264 err(EXIT_FAILURE, _("failed to read: %s"), pathbuf);
266 errx(EXIT_FAILURE, _("parse error: %s"), pathbuf);
273 path_exist(const char *path, ...)
279 p = path_vcreate(path, ap);
282 return access(p, F_OK) == 0;
286 path_cpuparse(int islist, const char *path, va_list ap)
290 size_t setsize, len = maxcpus * 7;
293 fd = path_vfopen("r", 1, path, ap);
295 if (!fgets(buf, len, fd))
296 err(EXIT_FAILURE, _("failed to read: %s"), pathbuf);
300 if (buf[len - 1] == '\n')
303 set = cpuset_alloc(maxcpus, &setsize, NULL);
305 err(EXIT_FAILURE, _("failed to callocate cpu set"));
308 if (cpulist_parse(buf, set, setsize))
309 errx(EXIT_FAILURE, _("failed to parse CPU list %s"), buf);
311 if (cpumask_parse(buf, set, setsize))
312 errx(EXIT_FAILURE, _("failed to parse CPU mask %s"), buf);
318 path_cpuset(const char *path, ...)
324 set = path_cpuparse(0, path, ap);
331 path_cpulist(const char *path, ...)
337 set = path_cpuparse(1, path, ap);
343 /* Lookup a pattern and get the value from cpuinfo.
346 * "<pattern> : <key>"
348 int lookup(char *line, char *pattern, char **value)
351 int len = strlen(pattern);
357 if (strncmp(line, pattern, len))
361 for (p = line + len; isspace(*p); p++);
368 for (++p; isspace(*p); p++);
376 len = strlen(line) - 1;
377 for (p = line + len; isspace(*(p-1)); p--);
384 /* Don't init the mode for platforms where we are not able to
385 * detect that CPU supports 64-bit mode.
393 /* reading info from any /{sys,proc} dump, don't mix it with
394 * information about our real CPU */
397 #if defined(__alpha__) || defined(__ia64__)
398 m |= MODE_64BIT; /* 64bit platforms only */
400 /* platforms with 64bit flag in /proc/cpuinfo, define
401 * 32bit default here */
402 #if defined(__i386__) || defined(__x86_64__) || \
403 defined(__s390x__) || defined(__s390__) || defined(__sparc_v9__)
410 read_basicinfo(struct lscpu_desc *desc)
412 FILE *fp = path_fopen("r", 1, _PATH_PROC_CPUINFO);
414 struct utsname utsbuf;
417 if (uname(&utsbuf) == -1)
418 err(EXIT_FAILURE, _("error: uname failed"));
419 desc->arch = xstrdup(utsbuf.machine);
422 while(path_exist(_PATH_SYS_SYSTEM "/cpu/cpu%d", desc->ncpus))
426 while (fgets(buf, sizeof(buf), fp) != NULL) {
427 if (lookup(buf, "vendor", &desc->vendor)) ;
428 else if (lookup(buf, "vendor_id", &desc->vendor)) ;
429 else if (lookup(buf, "family", &desc->family)) ;
430 else if (lookup(buf, "cpu family", &desc->family)) ;
431 else if (lookup(buf, "model", &desc->model)) ;
432 else if (lookup(buf, "stepping", &desc->stepping)) ;
433 else if (lookup(buf, "cpu MHz", &desc->mhz)) ;
434 else if (lookup(buf, "flags", &desc->flags)) ; /* x86 */
435 else if (lookup(buf, "features", &desc->flags)) ; /* s390 */
436 else if (lookup(buf, "type", &desc->flags)) ; /* sparc64 */
437 else if (lookup(buf, "bogomips", &desc->bogomips)) ;
438 else if (lookup(buf, "bogomips per cpu", &desc->bogomips)) ; /* s390 */
443 desc->mode = init_mode();
446 snprintf(buf, sizeof(buf), " %s ", desc->flags);
447 if (strstr(buf, " svm "))
448 desc->virtflag = strdup("svm");
449 else if (strstr(buf, " vmx "))
450 desc->virtflag = strdup("vmx");
451 if (strstr(buf, " lm "))
452 desc->mode |= MODE_32BIT | MODE_64BIT; /* x86_64 */
453 if (strstr(buf, " zarch "))
454 desc->mode |= MODE_32BIT | MODE_64BIT; /* s390x */
455 if (strstr(buf, " sun4v ") || strstr(buf, " sun4u "))
456 desc->mode |= MODE_32BIT | MODE_64BIT; /* sparc64 */
461 if (path_exist(_PATH_SYS_SYSTEM "/cpu/kernel_max"))
462 /* note that kernel_max is maximum index [NR_CPUS-1] */
463 maxcpus = path_getnum(_PATH_SYS_SYSTEM "/cpu/kernel_max") + 1;
465 else if (!sysrootlen)
466 /* the root is '/' so we are working with data from the current kernel */
467 maxcpus = get_max_number_of_cpus();
469 /* we are reading some /sys snapshot instead of the real /sys,
470 * let's use any crazy number... */
471 maxcpus = desc->ncpus > 2048 ? desc->ncpus : 2048;
473 /* get mask for online CPUs */
474 if (path_exist(_PATH_SYS_SYSTEM "/cpu/online")) {
475 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
476 desc->online = path_cpulist(_PATH_SYS_SYSTEM "/cpu/online");
477 desc->nthreads = CPU_COUNT_S(setsize, desc->online);
482 has_pci_device(int vendor, int device)
485 int num, fn, ven, dev;
488 f = path_fopen("r", 0, _PATH_PROC_PCIDEVS);
492 /* for more details about bus/pci/devices format see
493 * drivers/pci/proc.c in linux kernel
495 while(fscanf(f, "%02x%02x\t%04x%04x\t%*[^\n]",
496 &num, &fn, &ven, &dev) == 4) {
498 if (ven == vendor && dev == device)
508 #if defined(__x86_64__) || defined(__i386__)
511 * This CPUID leaf returns the information about the hypervisor.
512 * EAX : maximum input value for CPUID supported by the hypervisor.
513 * EBX, ECX, EDX : Hypervisor vendor ID signature. E.g. VMwareVMware.
515 #define HYPERVISOR_INFO_LEAF 0x40000000
518 cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
519 unsigned int *ecx, unsigned int *edx)
522 #if defined(__PIC__) && defined(__i386__)
523 /* x86 PIC cannot clobber ebx -- gcc bitches */
540 read_hypervisor_cpuid(struct lscpu_desc *desc)
542 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
543 char hyper_vendor_id[13];
545 memset(hyper_vendor_id, 0, sizeof(hyper_vendor_id));
547 cpuid(HYPERVISOR_INFO_LEAF, &eax, &ebx, &ecx, &edx);
548 memcpy(hyper_vendor_id + 0, &ebx, 4);
549 memcpy(hyper_vendor_id + 4, &ecx, 4);
550 memcpy(hyper_vendor_id + 8, &edx, 4);
551 hyper_vendor_id[12] = '\0';
553 if (!hyper_vendor_id[0])
556 if (!strncmp("XenVMMXenVMM", hyper_vendor_id, 12))
557 desc->hyper = HYPER_XEN;
558 else if (!strncmp("KVMKVMKVM", hyper_vendor_id, 9))
559 desc->hyper = HYPER_KVM;
560 else if (!strncmp("Microsoft Hv", hyper_vendor_id, 12))
561 desc->hyper = HYPER_MSHV;
562 else if (!strncmp("VMwareVMware", hyper_vendor_id, 12))
563 desc->hyper = HYPER_VMWARE;
566 #else /* ! __x86_64__ */
568 read_hypervisor_cpuid(struct lscpu_desc *desc)
574 read_hypervisor(struct lscpu_desc *desc)
576 read_hypervisor_cpuid(desc);
580 desc->virtype = VIRT_FULL;
582 else if (path_exist(_PATH_PROC_XEN)) {
583 /* Xen para-virt or dom0 */
584 FILE *fd = path_fopen("r", 0, _PATH_PROC_XENCAP);
590 if (fscanf(fd, "%s", buf) == 1 &&
591 !strcmp(buf, "control_d"))
595 desc->virtype = dom0 ? VIRT_NONE : VIRT_PARA;
596 desc->hyper = HYPER_XEN;
598 } else if (has_pci_device(0x5853, 0x0001)) {
599 /* Xen full-virt on non-x86_64 */
600 desc->hyper = HYPER_XEN;
601 desc->virtype = VIRT_FULL;
605 /* add @set to the @ary, unnecesary set is deallocated. */
606 static int add_cpuset_to_array(cpu_set_t **ary, int *items, cpu_set_t *set)
609 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
614 for (i = 0; i < *items; i++) {
615 if (CPU_EQUAL_S(setsize, set, ary[i]))
628 read_topology(struct lscpu_desc *desc, int num)
630 cpu_set_t *thread_siblings, *core_siblings, *book_siblings;
632 if (!path_exist(_PATH_SYS_CPU "/cpu%d/topology/thread_siblings", num))
635 thread_siblings = path_cpuset(_PATH_SYS_CPU
636 "/cpu%d/topology/thread_siblings", num);
637 core_siblings = path_cpuset(_PATH_SYS_CPU
638 "/cpu%d/topology/core_siblings", num);
639 book_siblings = NULL;
640 if (path_exist(_PATH_SYS_CPU "/cpu%d/topology/book_siblings", num)) {
641 book_siblings = path_cpuset(_PATH_SYS_CPU
642 "/cpu%d/topology/book_siblings", num);
645 if (!desc->coremaps) {
646 int nbooks, nsockets, ncores, nthreads;
647 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
649 /* threads within one core */
650 nthreads = CPU_COUNT_S(setsize, thread_siblings);
651 /* cores within one socket */
652 ncores = CPU_COUNT_S(setsize, core_siblings) / nthreads;
653 /* number of sockets within one book.
654 * Because of odd / non-present cpu maps and to keep
655 * calculation easy we make sure that nsockets and
656 * nbooks is at least 1.
658 nsockets = desc->ncpus / nthreads / ncores ?: 1;
659 /* number of books */
660 nbooks = desc->ncpus / nthreads / ncores / nsockets ?: 1;
662 /* all threads, see also read_basicinfo()
663 * -- fallback for kernels without
664 * /sys/devices/system/cpu/online.
667 desc->nthreads = nbooks * nsockets * ncores * nthreads;
668 /* For each map we make sure that it can have up to ncpus
669 * entries. This is because we cannot reliably calculate the
670 * number of cores, sockets and books on all architectures.
671 * E.g. completely virtualized architectures like s390 may
672 * have multiple sockets of different sizes.
674 desc->coremaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
675 desc->socketmaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
677 desc->bookmaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
680 add_cpuset_to_array(desc->socketmaps, &desc->nsockets, core_siblings);
681 add_cpuset_to_array(desc->coremaps, &desc->ncores, thread_siblings);
683 add_cpuset_to_array(desc->bookmaps, &desc->nbooks, book_siblings);
687 cachecmp(const void *a, const void *b)
689 struct cpu_cache *c1 = (struct cpu_cache *) a;
690 struct cpu_cache *c2 = (struct cpu_cache *) b;
692 return strcmp(c2->name, c1->name);
696 read_cache(struct lscpu_desc *desc, int num)
701 if (!desc->ncaches) {
702 while(path_exist(_PATH_SYS_SYSTEM "/cpu/cpu%d/cache/index%d",
709 desc->caches = xcalloc(desc->ncaches, sizeof(*desc->caches));
711 for (i = 0; i < desc->ncaches; i++) {
712 struct cpu_cache *ca = &desc->caches[i];
715 if (!path_exist(_PATH_SYS_SYSTEM "/cpu/cpu%d/cache/index%d",
722 path_getstr(buf, sizeof(buf),
723 _PATH_SYS_CPU "/cpu%d/cache/index%d/type",
725 if (!strcmp(buf, "Data"))
727 else if (!strcmp(buf, "Instruction"))
733 level = path_getnum(_PATH_SYS_CPU "/cpu%d/cache/index%d/level",
736 snprintf(buf, sizeof(buf), "L%d%c", level, type);
738 snprintf(buf, sizeof(buf), "L%d", level);
740 ca->name = xstrdup(buf);
743 path_getstr(buf, sizeof(buf),
744 _PATH_SYS_CPU "/cpu%d/cache/index%d/size",
746 ca->size = xstrdup(buf);
749 /* information about how CPUs share different caches */
750 map = path_cpuset(_PATH_SYS_CPU "/cpu%d/cache/index%d/shared_cpu_map",
754 ca->sharedmaps = xcalloc(desc->ncpus, sizeof(cpu_set_t *));
755 add_cpuset_to_array(ca->sharedmaps, &ca->nsharedmaps, map);
760 read_nodes(struct lscpu_desc *desc)
764 /* number of NUMA node */
765 while (path_exist(_PATH_SYS_SYSTEM "/node/node%d", desc->nnodes))
771 desc->nodemaps = xcalloc(desc->nnodes, sizeof(cpu_set_t *));
773 /* information about how nodes share different CPUs */
774 for (i = 0; i < desc->nnodes; i++)
775 desc->nodemaps[i] = path_cpuset(
776 _PATH_SYS_SYSTEM "/node/node%d/cpumap",
781 print_parsable_cell(struct lscpu_desc *desc, int i, int col, int compatible)
784 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
791 for (j = 0; j < desc->ncores; j++) {
792 if (CPU_ISSET_S(i, setsize, desc->coremaps[j])) {
799 for (j = 0; j < desc->nsockets; j++) {
800 if (CPU_ISSET_S(i, setsize, desc->socketmaps[j])) {
807 for (j = 0; j < desc->nnodes; j++) {
808 if (CPU_ISSET_S(i, setsize, desc->nodemaps[j])) {
815 for (j = 0; j < desc->nbooks; j++) {
816 if (CPU_ISSET_S(i, setsize, desc->bookmaps[j])) {
823 for (j = desc->ncaches - 1; j >= 0; j--) {
824 struct cpu_cache *ca = &desc->caches[j];
827 for (x = 0; x < ca->nsharedmaps; x++) {
828 if (CPU_ISSET_S(i, setsize, ca->sharedmaps[x])) {
834 putchar(compatible ? ',' : ':');
841 * We support two formats:
843 * 1) "compatible" -- this format is compatible with the original lscpu(1)
844 * output and it contains fixed set of the columns. The CACHE columns are at
845 * the end of the line and the CACHE is not printed if the number of the caches
846 * is zero. The CACHE columns are separated by two commas, for example:
849 * # CPU,Core,Socket,Node,,L1d,L1i,L2
853 * 2) "user defined output" -- this format prints always all columns without
854 * special prefix for CACHE column. If there are not CACHEs then the column is
855 * empty and the header "Cache" is printed rather than a real name of the cache.
856 * The CACHE columns are separated by ':'.
858 * $ lscpu --parse=CPU,CORE,SOCKET,NODE,CACHE
859 * # CPU,Core,Socket,Node,L1d:L1i:L2
864 print_parsable(struct lscpu_desc *desc, int cols[], int ncols, int compatible)
869 "# The following is the parsable format, which can be fed to other\n"
870 "# programs. Each different item in every column has an unique ID\n"
871 "# starting from zero.\n"));
874 for (i = 0; i < ncols; i++) {
875 if (cols[i] == COL_CACHE) {
876 if (compatible && !desc->ncaches)
880 if (compatible && i != 0)
882 for (c = desc->ncaches - 1; c >= 0; c--) {
883 printf("%s", desc->caches[c].name);
885 putchar(compatible ? ',' : ':');
888 fputs(colnames[cols[i]], stdout);
892 fputs(colnames[cols[i]], stdout);
897 for (i = 0; i < desc->ncpus; i++) {
898 if (desc->online && !is_cpu_online(desc, i))
900 for (c = 0; c < ncols; c++) {
901 if (compatible && cols[c] == COL_CACHE) {
909 print_parsable_cell(desc, i, cols[c], compatible);
916 /* output formats "<key> <value>"*/
917 #define print_s(_key, _val) printf("%-23s%s\n", _key, _val)
918 #define print_n(_key, _val) printf("%-23s%d\n", _key, _val)
921 print_cpuset(const char *key, cpu_set_t *set, int hex)
923 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
924 size_t setbuflen = 7 * maxcpus;
925 char setbuf[setbuflen], *p;
928 p = cpumask_create(setbuf, setbuflen, set, setsize);
929 printf("%-23s0x%s\n", key, p);
931 p = cpulist_create(setbuf, setbuflen, set, setsize);
938 print_readable(struct lscpu_desc *desc, int hex)
942 size_t setsize = CPU_ALLOC_SIZE(maxcpus);
944 print_s(_("Architecture:"), desc->arch);
947 char buf[64], *p = buf;
949 if (desc->mode & MODE_32BIT) {
950 strcpy(p, "32-bit, ");
953 if (desc->mode & MODE_64BIT) {
954 strcpy(p, "64-bit, ");
958 print_s(_("CPU op-mode(s):"), buf);
960 #if !defined(WORDS_BIGENDIAN)
961 print_s(_("Byte Order:"), "Little Endian");
963 print_s(_("Byte Order:"), "Big Endian");
965 print_n(_("CPU(s):"), desc->ncpus);
968 print_cpuset(hex ? _("On-line CPU(s) mask:") :
969 _("On-line CPU(s) list:"),
972 if (desc->online && CPU_COUNT_S(setsize, desc->online) != desc->ncpus) {
975 /* Linux kernel provides cpuset of off-line CPUs that contains
976 * all configured CPUs (see /sys/devices/system/cpu/offline),
977 * but want to print real (present in system) off-line CPUs only.
979 set = cpuset_alloc(maxcpus, NULL, NULL);
981 err(EXIT_FAILURE, _("failed to callocate cpu set"));
982 CPU_ZERO_S(setsize, set);
983 for (i = 0; i < desc->ncpus; i++) {
984 if (!is_cpu_online(desc, i))
985 CPU_SET_S(i, setsize, set);
987 print_cpuset(hex ? _("Off-line CPU(s) mask:") :
988 _("Off-line CPU(s) list:"),
993 if (desc->nsockets) {
994 int cores_per_socket, sockets_per_book, books;
996 cores_per_socket = sockets_per_book = books = 0;
997 /* s390 detects its cpu topology via /proc/sysinfo, if present.
998 * Using simply the cpu topology masks in sysfs will not give
999 * usable results since everything is virtualized. E.g.
1000 * virtual core 0 may have only 1 cpu, but virtual core 2 may
1002 * If the cpu topology is not exported (e.g. 2nd level guest)
1003 * fall back to old calculation scheme.
1005 if (path_exist(_PATH_PROC_SYSINFO)) {
1006 FILE *fd = path_fopen("r", 0, _PATH_PROC_SYSINFO);
1010 while (fgets(buf, sizeof(buf), fd) != NULL) {
1011 if (sscanf(buf, "CPU Topology SW:%d%d%d%d%d%d",
1012 &t0, &t1, &t2, &books, &sockets_per_book,
1013 &cores_per_socket) == 6)
1017 print_n(_("Thread(s) per core:"), desc->nthreads / desc->ncores);
1018 print_n(_("Core(s) per socket:"),
1019 cores_per_socket ?: desc->ncores / desc->nsockets);
1021 print_n(_("Socket(s) per book:"),
1022 sockets_per_book ?: desc->nsockets / desc->nbooks);
1023 print_n(_("Book(s):"), books ?: desc->nbooks);
1025 print_n(_("Socket(s):"), sockets_per_book ?: desc->nsockets);
1029 print_n(_("NUMA node(s):"), desc->nnodes);
1031 print_s(_("Vendor ID:"), desc->vendor);
1033 print_s(_("CPU family:"), desc->family);
1035 print_s(_("Model:"), desc->model);
1037 print_s(_("Stepping:"), desc->stepping);
1039 print_s(_("CPU MHz:"), desc->mhz);
1041 print_s(_("BogoMIPS:"), desc->bogomips);
1042 if (desc->virtflag) {
1043 if (!strcmp(desc->virtflag, "svm"))
1044 print_s(_("Virtualization:"), "AMD-V");
1045 else if (!strcmp(desc->virtflag, "vmx"))
1046 print_s(_("Virtualization:"), "VT-x");
1049 print_s(_("Hypervisor vendor:"), hv_vendors[desc->hyper]);
1050 print_s(_("Virtualization type:"), virt_types[desc->virtype]);
1052 if (desc->ncaches) {
1056 for (i = desc->ncaches - 1; i >= 0; i--) {
1057 snprintf(buf, sizeof(buf),
1058 _("%s cache:"), desc->caches[i].name);
1059 print_s(buf, desc->caches[i].size);
1063 for (i = 0; i < desc->nnodes; i++) {
1064 snprintf(buf, sizeof(buf), _("NUMA node%d CPU(s):"), i);
1065 print_cpuset(buf, desc->nodemaps[i], hex);
1069 static void __attribute__((__noreturn__)) usage(FILE *out)
1071 fputs(_("\nUsage:\n"), out);
1073 _(" %s [options]\n"), program_invocation_short_name);
1075 fputs(_("\nOptions:\n"), out);
1076 fputs(_(" -h, --help print this help\n"
1077 " -p, --parse <list> print out a parsable instead of a readable format\n"
1078 " -s, --sysroot <dir> use directory DIR as system root\n"
1079 " -x, --hex print hexadecimal masks rather than lists of CPUs\n\n"), out);
1081 exit(out == stderr ? EXIT_FAILURE : EXIT_SUCCESS);
1084 int main(int argc, char *argv[])
1086 struct lscpu_desc _desc, *desc = &_desc;
1087 int parsable = 0, c, i, hex = 0;
1088 int columns[ARRAY_SIZE(colnames)], ncolumns = 0;
1091 static const struct option longopts[] = {
1092 { "help", no_argument, 0, 'h' },
1093 { "parse", optional_argument, 0, 'p' },
1094 { "sysroot", required_argument, 0, 's' },
1095 { "hex", no_argument, 0, 'x' },
1099 setlocale(LC_ALL, "");
1100 bindtextdomain(PACKAGE, LOCALEDIR);
1101 textdomain(PACKAGE);
1103 while ((c = getopt_long(argc, argv, "hp::s:x", longopts, NULL)) != -1) {
1112 ncolumns = string_to_idarray(optarg,
1113 columns, ARRAY_SIZE(columns),
1116 return EXIT_FAILURE;
1118 columns[ncolumns++] = COL_CPU;
1119 columns[ncolumns++] = COL_CORE;
1120 columns[ncolumns++] = COL_SOCKET;
1121 columns[ncolumns++] = COL_NODE;
1122 columns[ncolumns++] = COL_CACHE;
1127 sysrootlen = strlen(optarg);
1128 strncpy(pathbuf, optarg, sizeof(pathbuf));
1129 pathbuf[sizeof(pathbuf) - 1] = '\0';
1139 memset(desc, 0, sizeof(*desc));
1141 read_basicinfo(desc);
1143 for (i = 0; i < desc->ncpus; i++) {
1144 if (desc->online && !is_cpu_online(desc, i))
1146 read_topology(desc, i);
1147 read_cache(desc, i);
1150 qsort(desc->caches, desc->ncaches, sizeof(struct cpu_cache), cachecmp);
1154 read_hypervisor(desc);
1158 print_parsable(desc, columns, ncolumns, compatible);
1160 print_readable(desc, hex);
1162 return EXIT_SUCCESS;