x86_cpuinit.setup_percpu_clockev();
wmb();
- cpu_startup_entry(CPUHP_ONLINE);
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
+int topology_update_package_map(unsigned int apicid, unsigned int cpu)
+{
+ unsigned int new, pkg = apicid >> boot_cpu_data.x86_coreid_bits;
+
+ /* Called from early boot ? */
+ if (!physical_package_map)
+ return 0;
+
+ if (pkg >= max_physical_pkg_id)
+ return -EINVAL;
+
+ /* Set the logical package id */
+ if (test_and_set_bit(pkg, physical_package_map))
+ goto found;
+
+ if (pkg < __max_logical_packages) {
+ set_bit(pkg, logical_package_map);
+ physical_to_logical_pkg[pkg] = pkg;
+ goto found;
+ }
+ new = find_first_zero_bit(logical_package_map, __max_logical_packages);
+ if (new >= __max_logical_packages) {
+ physical_to_logical_pkg[pkg] = -1;
+ pr_warn("APIC(%x) Package %u exceeds logical package map\n",
+ apicid, pkg);
+ return -ENOSPC;
+ }
+ set_bit(new, logical_package_map);
+ pr_info("APIC(%x) Converting physical %u to logical package %u\n",
+ apicid, pkg, new);
+ physical_to_logical_pkg[pkg] = new;
+
+found:
+ cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg];
+ return 0;
+}
+
+/**
+ * topology_phys_to_logical_pkg - Map a physical package id to a logical
+ *
+ * Returns logical package id or -1 if not found
+ */
+int topology_phys_to_logical_pkg(unsigned int phys_pkg)
+{
+ if (phys_pkg >= max_physical_pkg_id)
+ return -1;
+ return physical_to_logical_pkg[phys_pkg];
+}
+EXPORT_SYMBOL(topology_phys_to_logical_pkg);
+
+static void __init smp_init_package_map(void)
+{
+ unsigned int ncpus, cpu;
+ size_t size;
+
+ /*
+ * Today neither Intel nor AMD support heterogenous systems. That
+ * might change in the future....
+ */
+ ncpus = boot_cpu_data.x86_max_cores * smp_num_siblings;
+ __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
+
+ /*
+ * Possibly larger than what we need as the number of apic ids per
+ * package can be smaller than the actual used apic ids.
+ */
+ max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus);
+ size = max_physical_pkg_id * sizeof(unsigned int);
+ physical_to_logical_pkg = kmalloc(size, GFP_KERNEL);
+ memset(physical_to_logical_pkg, 0xff, size);
+ size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
+ physical_package_map = kzalloc(size, GFP_KERNEL);
+ size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long);
+ logical_package_map = kzalloc(size, GFP_KERNEL);
+
+ pr_info("Max logical packages: %u\n", __max_logical_packages);
+
+ for_each_present_cpu(cpu) {
+ unsigned int apicid = apic->cpu_present_to_apicid(cpu);
+
+ if (apicid == BAD_APICID || !apic->apic_id_valid(apicid))
+ continue;
+ if (!topology_update_package_map(apicid, cpu))
+ continue;
+ pr_warn("CPU %u APICId %x disabled\n", cpu, apicid);
+ per_cpu(x86_bios_cpu_apicid, cpu) = BAD_APICID;
+ set_cpu_possible(cpu, false);
+ set_cpu_present(cpu, false);
+ }
+}
+
void __init smp_store_boot_cpu_info(void)
{
int id = 0; /* CPU 0 */
rcu_init_percpu_data(cpu, rsp);
}
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ #ifdef CONFIG_HOTPLUG_CPU
+ /*
+ * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
+ * function. We now remove it from the rcu_node tree's ->qsmaskinit
+ * bit masks.
++ * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
++ * function. We now remove it from the rcu_node tree's ->qsmaskinit
++ * bit masks.
+ */
+ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
+ {
+ unsigned long flags;
+ unsigned long mask;
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
+
+ if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
+ return;
+
+ /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
+ mask = rdp->grpmask;
+ raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
+ rnp->qsmaskinitnext &= ~mask;
++ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
+
+ void rcu_report_dead(unsigned int cpu)
+ {
+ struct rcu_state *rsp;
+
+ /* QS for any half-done expedited RCU-sched GP. */
+ preempt_disable();
+ rcu_report_exp_rdp(&rcu_sched_state,
+ this_cpu_ptr(rcu_sched_state.rda), true);
+ preempt_enable();
+ for_each_rcu_flavor(rsp)
+ rcu_cleanup_dying_idle_cpu(cpu, rsp);
+ }
+ #endif
+
/*
* Handle CPU online/offline notification events.
*/