2 * include/linux/topology.h
4 * Written by: Matthew Dobson, IBM Corporation
6 * Copyright (C) 2002, IBM Corp.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * Send feedback to <colpatch@us.ibm.com>
27 #ifndef _LINUX_TOPOLOGY_H
28 #define _LINUX_TOPOLOGY_H
30 #include <linux/arch_topology.h>
31 #include <linux/cpumask.h>
32 #include <linux/bitops.h>
33 #include <linux/mmzone.h>
34 #include <linux/smp.h>
35 #include <linux/percpu.h>
36 #include <asm/topology.h>
39 #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
42 #define for_each_node_with_cpus(node) \
43 for_each_online_node(node) \
44 if (nr_cpus_node(node))
46 int arch_update_cpu_topology(void);
48 /* Conform to ACPI 2.0 SLIT distance definitions */
49 #define LOCAL_DISTANCE 10
50 #define REMOTE_DISTANCE 20
51 #define DISTANCE_BITS 8
53 #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
55 #ifndef RECLAIM_DISTANCE
57 * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
58 * (in whatever arch specific measurement units returned by node_distance())
59 * and node_reclaim_mode is enabled then the VM will only call node_reclaim()
60 * on nodes within this distance.
62 #define RECLAIM_DISTANCE 30
66 * The following tunable allows platforms to override the default node
67 * reclaim distance (RECLAIM_DISTANCE) if remote memory accesses are
68 * sufficiently fast that the default value actually hurts
71 * AMD EPYC machines use this because even though the 2-hop distance
72 * is 32 (3.2x slower than a local memory access) performance actually
73 * *improves* if allowed to reclaim memory and load balance tasks
74 * between NUMA nodes 2-hops apart.
76 extern int __read_mostly node_reclaim_distance;
78 #ifndef PENALTY_FOR_NODE_WITH_CPUS
79 #define PENALTY_FOR_NODE_WITH_CPUS (1)
82 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
83 DECLARE_PER_CPU(int, numa_node);
86 /* Returns the number of the current Node. */
87 static inline int numa_node_id(void)
89 return raw_cpu_read(numa_node);
94 static inline int cpu_to_node(int cpu)
96 return per_cpu(numa_node, cpu);
100 #ifndef set_numa_node
101 static inline void set_numa_node(int node)
103 this_cpu_write(numa_node, node);
107 #ifndef set_cpu_numa_node
108 static inline void set_cpu_numa_node(int cpu, int node)
110 per_cpu(numa_node, cpu) = node;
114 #else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
116 /* Returns the number of the current Node. */
118 static inline int numa_node_id(void)
120 return cpu_to_node(raw_smp_processor_id());
124 #endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
126 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
129 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
130 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
131 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
133 DECLARE_PER_CPU(int, _numa_mem_);
136 static inline void set_numa_mem(int node)
138 this_cpu_write(_numa_mem_, node);
143 /* Returns the number of the nearest Node with memory */
144 static inline int numa_mem_id(void)
146 return raw_cpu_read(_numa_mem_);
151 static inline int cpu_to_mem(int cpu)
153 return per_cpu(_numa_mem_, cpu);
157 #ifndef set_cpu_numa_mem
158 static inline void set_cpu_numa_mem(int cpu, int node)
160 per_cpu(_numa_mem_, cpu) = node;
164 #else /* !CONFIG_HAVE_MEMORYLESS_NODES */
167 /* Returns the number of the nearest Node with memory */
168 static inline int numa_mem_id(void)
170 return numa_node_id();
175 static inline int cpu_to_mem(int cpu)
177 return cpu_to_node(cpu);
181 #endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
183 #if defined(topology_die_id) && defined(topology_die_cpumask)
184 #define TOPOLOGY_DIE_SYSFS
186 #if defined(topology_cluster_id) && defined(topology_cluster_cpumask)
187 #define TOPOLOGY_CLUSTER_SYSFS
189 #if defined(topology_book_id) && defined(topology_book_cpumask)
190 #define TOPOLOGY_BOOK_SYSFS
192 #if defined(topology_drawer_id) && defined(topology_drawer_cpumask)
193 #define TOPOLOGY_DRAWER_SYSFS
196 #ifndef topology_physical_package_id
197 #define topology_physical_package_id(cpu) ((void)(cpu), -1)
199 #ifndef topology_die_id
200 #define topology_die_id(cpu) ((void)(cpu), -1)
202 #ifndef topology_cluster_id
203 #define topology_cluster_id(cpu) ((void)(cpu), -1)
205 #ifndef topology_core_id
206 #define topology_core_id(cpu) ((void)(cpu), 0)
208 #ifndef topology_book_id
209 #define topology_book_id(cpu) ((void)(cpu), -1)
211 #ifndef topology_drawer_id
212 #define topology_drawer_id(cpu) ((void)(cpu), -1)
214 #ifndef topology_ppin
215 #define topology_ppin(cpu) ((void)(cpu), 0ull)
217 #ifndef topology_sibling_cpumask
218 #define topology_sibling_cpumask(cpu) cpumask_of(cpu)
220 #ifndef topology_core_cpumask
221 #define topology_core_cpumask(cpu) cpumask_of(cpu)
223 #ifndef topology_cluster_cpumask
224 #define topology_cluster_cpumask(cpu) cpumask_of(cpu)
226 #ifndef topology_die_cpumask
227 #define topology_die_cpumask(cpu) cpumask_of(cpu)
229 #ifndef topology_book_cpumask
230 #define topology_book_cpumask(cpu) cpumask_of(cpu)
232 #ifndef topology_drawer_cpumask
233 #define topology_drawer_cpumask(cpu) cpumask_of(cpu)
236 #if defined(CONFIG_SCHED_SMT) && !defined(cpu_smt_mask)
237 static inline const struct cpumask *cpu_smt_mask(int cpu)
239 return topology_sibling_cpumask(cpu);
243 static inline const struct cpumask *cpu_cpu_mask(int cpu)
245 return cpumask_of_node(cpu_to_node(cpu));
249 int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
250 extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
252 static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
254 return cpumask_nth_and(cpu, cpus, cpu_online_mask);
257 static inline const struct cpumask *
258 sched_numa_hop_mask(unsigned int node, unsigned int hops)
260 return ERR_PTR(-EOPNOTSUPP);
262 #endif /* CONFIG_NUMA */
265 * for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
267 * @mask: the iteration variable.
268 * @node: the NUMA node to start the search from.
270 * Requires rcu_lock to be held.
272 * Yields cpu_online_mask for @node == NUMA_NO_NODE.
274 #define for_each_numa_hop_mask(mask, node) \
275 for (unsigned int __hops = 0; \
276 mask = (node != NUMA_NO_NODE || __hops) ? \
277 sched_numa_hop_mask(node, __hops) : \
279 !IS_ERR_OR_NULL(mask); \
282 #endif /* _LINUX_TOPOLOGY_H */