static int topology_enabled = 1;
static DECLARE_WORK(topology_work, topology_work_fn);
-/* topology_lock protects the socket and book linked lists */
-static DEFINE_SPINLOCK(topology_lock);
+/*
+ * Socket/Book linked lists and per_cpu(cpu_topology) updates are
+ * protected by "sched_domains_mutex".
+ */
static struct mask_info socket_info;
static struct mask_info book_info;
{
struct cpuid cpu_id;
- spin_lock_irq(&topology_lock);
get_cpu_id(&cpu_id);
clear_masks();
switch (cpu_id.machine) {
default:
__tl_to_masks_generic(info);
}
- spin_unlock_irq(&topology_lock);
}
static void topology_update_polarization_simple(void)
static void update_cpu_masks(void)
{
- unsigned long flags;
int cpu;
- spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
per_cpu(cpu_topology, cpu).book_id = cpu;
}
}
- spin_unlock_irqrestore(&topology_lock, flags);
numa_update_cpu_topology();
}
static int emu_nodes = 1;
/* NUMA stripe size */
static unsigned long emu_size;
+
+/*
+ * Node to core pinning information updates are protected by
+ * "sched_domains_mutex".
+ */
/* Pinned core to node mapping */
static int cores_to_node_id[CONFIG_NR_CPUS];
/* Total number of pinned cores */
* Transfer physical topology into a NUMA topology and modify CPU masks
* according to the NUMA topology.
*
- * This function is called under the CPU hotplug lock.
+ * Must be called with "sched_domains_mutex" lock held.
*/
static void emu_update_cpu_topology(void)
{