int (*cpu_mask_to_apicid)(const struct cpumask *cpumask,
struct irq_data *irqdata,
unsigned int *apicid);
+ u32 (*calc_dest_apicid)(unsigned int cpu);
/* ICR related functions */
u64 (*icr_read)(void);
extern int default_apic_id_valid(int apicid);
extern int default_acpi_madt_oem_check(char *, char *);
extern void default_setup_apic_routing(void);
+
+extern u32 apic_default_calc_apicid(unsigned int cpu);
+extern u32 apic_flat_calc_apicid(unsigned int cpu);
+
extern int flat_cpu_mask_to_apicid(const struct cpumask *cpumask,
struct irq_data *irqdata,
unsigned int *apicid);
#include <linux/irq.h>
#include <asm/apic.h>
+u32 apic_default_calc_apicid(unsigned int cpu)
+{
+ return per_cpu(x86_cpu_to_apicid, cpu);
+}
+
int default_cpu_mask_to_apicid(const struct cpumask *msk, struct irq_data *irqd,
unsigned int *apicid)
{
return 0;
}
+u32 apic_flat_calc_apicid(unsigned int cpu)
+{
+ return 1U << cpu;
+}
+
int flat_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqd,
unsigned int *apicid)
.set_apic_id = set_apic_id,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_flat_calc_apicid,
.send_IPI = default_send_IPI_single,
.send_IPI_mask = flat_send_IPI_mask,
.set_apic_id = set_apic_id,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = default_send_IPI_single_phys,
.send_IPI_mask = default_send_IPI_mask_sequence_phys,
.set_apic_id = NULL,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_flat_calc_apicid,
.send_IPI = noop_send_IPI,
.send_IPI_mask = noop_send_IPI_mask,
.set_apic_id = numachip1_set_apic_id,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = numachip_send_IPI_one,
.send_IPI_mask = numachip_send_IPI_mask,
.set_apic_id = numachip2_set_apic_id,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = numachip_send_IPI_one,
.send_IPI_mask = numachip_send_IPI_mask,
.set_apic_id = NULL,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = default_send_IPI_single_phys,
.send_IPI_mask = default_send_IPI_mask_sequence_phys,
.set_apic_id = NULL,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_flat_calc_apicid,
.send_IPI = default_send_IPI_single,
.send_IPI_mask = default_send_IPI_mask_logical,
return 0;
}
+static u32 x2apic_calc_apicid(unsigned int cpu)
+{
+ return per_cpu(x86_cpu_to_logical_apicid, cpu);
+}
+
static void init_x2apic_ldr(void)
{
struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
.set_apic_id = x2apic_set_apic_id,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
+ .calc_dest_apicid = x2apic_calc_apicid,
.send_IPI = x2apic_send_IPI,
.send_IPI_mask = x2apic_send_IPI_mask,
.set_apic_id = x2apic_set_apic_id,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = x2apic_send_IPI,
.send_IPI_mask = x2apic_send_IPI_mask,
return ret;
}
+static u32 apic_uv_calc_apicid(unsigned int cpu)
+{
+ return apic_default_calc_apicid(cpu) | uv_apicid_hibits;
+}
+
static unsigned int x2apic_get_apic_id(unsigned long x)
{
unsigned int id;
.set_apic_id = set_apic_id,
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_uv_calc_apicid,
.send_IPI = uv_send_IPI_one,
.send_IPI_mask = uv_send_IPI_mask,
.set_apic_id = xen_set_apic_id, /* Can be NULL on 32-bit. */
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_flat_calc_apicid,
#ifdef CONFIG_SMP
.send_IPI_mask = xen_send_IPI_mask,