#include <asm/nmi.h>
#include <asm/msidef.h>
#include <asm/hypertransport.h>
+++#include <asm/setup.h>
#include <mach_apic.h>
#include <mach_apicdef.h>
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock);
--static DEFINE_SPINLOCK(vector_lock);
++DEFINE_SPINLOCK(vector_lock);
int timer_through_8259 __initdata;
return vector;
}
--void setup_vector_irq(int cpu)
--{
--}
--
static struct irq_chip ioapic_chip;
#define IOAPIC_AUTO -1
void /*__init*/ print_local_APIC(void *dummy)
{
unsigned int v, ver, maxlvt;
++ + u64 icr;
if (apic_verbosity == APIC_QUIET)
return;
smp_processor_id(), hard_smp_processor_id());
v = apic_read(APIC_ID);
printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v,
- - GET_APIC_ID(read_apic_id()));
+ + GET_APIC_ID(v));
v = apic_read(APIC_LVR);
printk(KERN_INFO "... APIC VERSION: %08x\n", v);
ver = GET_APIC_VERSION(v);
printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
}
-- - v = apic_read(APIC_ICR);
-- - printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
-- - v = apic_read(APIC_ICR2);
-- - printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++ + icr = apic_icr_read();
++ + printk(KERN_DEBUG "... APIC ICR: %08x\n", icr);
++ + printk(KERN_DEBUG "... APIC ICR2: %08x\n", icr >> 32);
v = apic_read(APIC_LVTT);
printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
entry.dest_mode = 0; /* Physical */
entry.delivery_mode = dest_ExtINT; /* ExtInt */
entry.vector = 0;
- - entry.dest.physical.physical_dest =
- - GET_APIC_ID(read_apic_id());
+ + entry.dest.physical.physical_dest = read_apic_id();
/*
* Add it to the IO-APIC irq-routing table:
unsigned char old_id;
unsigned long flags;
---#ifdef CONFIG_X86_NUMAQ
--- if (found_numaq)
+++ if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
return;
---#endif
/*
* Don't check I/O APIC IDs for xAPIC systems. They have
#endif
}
-#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
-cpumask_t *cpumask_of_cpu_map __read_mostly;
-EXPORT_SYMBOL(cpumask_of_cpu_map);
-
-/* requires nr_cpu_ids to be initialized */
-static void __init setup_cpumask_of_cpu(void)
-{
- int i;
-
- /* alloc_bootmem zeroes memory */
- cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
- for (i = 0; i < nr_cpu_ids; i++)
- cpu_set(i, cpumask_of_cpu_map[i]);
-}
-#else
-static inline void setup_cpumask_of_cpu(void) { }
-#endif
-
#ifdef CONFIG_X86_32
/*
* Great future not-so-futuristic plan: make i386 and x86_64 do it
printk(KERN_INFO
"cpu %d has no node %d or node-local memory\n",
cpu, node);
+++ if (ptr)
+++ printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n",
+++ cpu, __pa(ptr));
}
--- else
+++ else {
ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
+++ if (ptr)
+++ printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
+++ cpu, node, __pa(ptr));
+++ }
#endif
per_cpu_offset(cpu) = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
/* Setup node to cpumask map */
setup_node_to_cpumask_map();
-
- /* Setup cpumask_of_cpu map */
- setup_cpumask_of_cpu();
}
#endif
#include <asm/proto.h>
#include <asm/ia32_unistd.h>
#include <asm/mce.h>
+ ++#include <asm/syscalls.h>
#include "sigframe.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
return do_sigaltstack(uss, uoss, regs->sp);
}
-- return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
+/*
+ * Signal frame handlers.
+ */
+
+static inline int save_i387(struct _fpstate __user *buf)
+{
+ struct task_struct *tsk = current;
+ int err = 0;
+
+ BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
+ sizeof(tsk->thread.xstate->fxsave));
+
+ if ((unsigned long)buf % 16)
+ printk("save_i387: bad fpstate %p\n", buf);
+
+ if (!used_math())
+ return 0;
+ clear_used_math(); /* trigger finit */
+ if (task_thread_info(tsk)->status & TS_USEDFPU) {
+ err = save_i387_checking((struct i387_fxsave_struct __user *)
+ buf);
+ if (err)
+ return err;
+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
+ stts();
+ } else {
+ if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
+ sizeof(struct i387_fxsave_struct)))
+ return -1;
+ }
+ return 1;
+}
+
+/*
+ * This restores directly out of user space. Exceptions are handled.
+ */
+static inline int restore_i387(struct _fpstate __user *buf)
+{
+ struct task_struct *tsk = current;
+ int err;
+
+ if (!used_math()) {
+ err = init_fpu(tsk);
+ if (err)
+ return err;
+ }
+
+ if (!(task_thread_info(current)->status & TS_USEDFPU)) {
+ clts();
+ task_thread_info(current)->status |= TS_USEDFPU;
+ }
+++ err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
+++ if (unlikely(err)) {
+++ /*
+++ * Encountered an error while doing the restore from the
+++ * user buffer, clear the fpu state.
+++ */
+++ clear_fpu(tsk);
+++ clear_used_math();
+++ }
+++ return err;
+}
/*
* Do a signal return; undo the signal stack.
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
#else
- --struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
+ ++static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
#endif
static atomic_t init_deasserted;
- -static int boot_cpu_logical_apicid;
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
/* Set if we find a B stepping CPU */
- --int __cpuinitdata smp_b_stepping;
+ ++static int __cpuinitdata smp_b_stepping;
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
#endif
#ifdef CONFIG_X86_32
+ +static int boot_cpu_logical_apicid;
+ +
u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = BAD_APICID };
/*
* (This works even if the APIC is not enabled.)
*/
- - phys_id = GET_APIC_ID(read_apic_id());
+ + phys_id = read_apic_id();
cpuid = smp_processor_id();
if (cpu_isset(cpuid, cpu_callin_map)) {
panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
* for which cpus receive the IPI. Holding this
* lock helps us to not include this cpu in a currently in progress
* smp_call_function().
++ *
++ * We need to hold vector_lock so there the set of online cpus
++ * does not change while we are assigning vectors to cpus. Holding
++ * this lock ensures we don't half assign or remove an irq from a cpu.
*/
ipi_call_lock_irq();
--#ifdef CONFIG_X86_IO_APIC
-- setup_vector_irq(smp_processor_id());
--#endif
++ lock_vector_lock();
++ __setup_vector_irq(smp_processor_id());
cpu_set(smp_processor_id(), cpu_online_map);
++ unlock_vector_lock();
ipi_call_unlock_irq();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
printk(KERN_CONT
"a previous APIC delivery may have failed\n");
- - apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
- - apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
+ + apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
timeout = 0;
do {
int maxlvt;
/* Target chip */
- - apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
- -
/* Boot on the stack */
/* Kick the second */
- - apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
+ + apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
/*
* Turn INIT on target chip
*/
- - apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
- -
/*
* Send IPI
*/
- - apic_write(APIC_ICR,
- - APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT);
+ + apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
+ + phys_apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
pr_debug("Deasserting INIT.\n");
/* Target chip */
- - apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
- -
/* Send IPI */
- - apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
+ + apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
*/
/* Target chip */
- - apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
- -
/* Boot on the stack */
/* Kick the second */
- - apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12));
+ + apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
+ + phys_apicid);
/*
* Give the other CPU some time to accept the IPI.
flush_tlb_all();
low_mappings = 1;
++#ifdef CONFIG_X86_PC
++ if (def_to_bigsmp && apicid > 8) {
++ printk(KERN_WARNING
++ "More than 8 CPUs detected - skipping them.\n"
++ "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
++ err = -1;
++ } else
++ err = do_boot_cpu(apicid, cpu);
++#else
err = do_boot_cpu(apicid, cpu);
++#endif
zap_low_mappings();
low_mappings = 0;
* Setup boot CPU information
*/
smp_store_cpu_info(0); /* Final full version of the data */
+ +#ifdef CONFIG_X86_32
boot_cpu_logical_apicid = logical_smp_processor_id();
+ +#endif
current_thread_info()->cpu = 0; /* needed? */
set_cpu_sibling_map(0);
+ +#ifdef CONFIG_X86_64
+ + enable_IR_x2apic();
+ + setup_apic_routing();
+ +#endif
+ +
if (smp_sanity_check(max_cpus) < 0) {
printk(KERN_INFO "SMP disabled\n");
disable_smp();
}
preempt_disable();
- - if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) {
+ + if (read_apic_id() != boot_cpu_physical_apicid) {
panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
- - GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid);
+ + read_apic_id(), boot_cpu_physical_apicid);
/* Or can we switch back to PIC here? */
}
preempt_enable();
remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */
++ lock_vector_lock();
remove_cpu_from_maps(cpu);
++ unlock_vector_lock();
fixup_irqs(cpu_online_map);
return 0;
}
#include <asm/e820.h>
#include <asm/setup.h>
+ ++#include <mach_ipi.h>
+ ++
#ifdef CONFIG_HOTPLUG_CPU
#define DEFAULT_SEND_IPI (1)
#else
#define DEFAULT_SEND_IPI (0)
#endif
- --int no_broadcast=DEFAULT_SEND_IPI;
+ ++int no_broadcast = DEFAULT_SEND_IPI;
/**
* pre_intr_init_hook - initialisation prior to setting up interrupt vectors
init_ISA_irqs();
}
---/*
--- * IRQ2 is cascade interrupt to second interrupt controller
--- */
---static struct irqaction irq2 = {
--- .handler = no_action,
--- .mask = CPU_MASK_NONE,
--- .name = "cascade",
---};
---
/**
* intr_init_hook - post gate setup interrupt initialisation
*
if (x86_quirks->arch_intr_init())
return;
}
---#ifdef CONFIG_X86_LOCAL_APIC
--- apic_intr_init();
---#endif
---
--- if (!acpi_ioapic)
--- setup_irq(2, &irq2);
}
/**
---#ifndef _ASM_X86_APIC_H
---#define _ASM_X86_APIC_H
+++#ifndef ASM_X86__APIC_H
+++#define ASM_X86__APIC_H
#include <linux/pm.h>
#include <linux/delay.h>
#include <asm/apicdef.h>
#include <asm/processor.h>
#include <asm/system.h>
+ +#include <asm/cpufeature.h>
+ +#include <asm/msr.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
- -#define apic_write native_apic_write
- -#define apic_read native_apic_read
#define setup_boot_clock setup_boot_APIC_clock
#define setup_secondary_clock setup_secondary_APIC_clock
#endif
extern int is_vsmp_box(void);
+ ++extern void xapic_wait_icr_idle(void);
+ ++extern u32 safe_xapic_wait_icr_idle(void);
+ ++extern u64 xapic_icr_read(void);
+ ++extern void xapic_icr_write(u32, u32);
+ ++extern int setup_profiling_timer(unsigned int);
- -static inline void native_apic_write(unsigned long reg, u32 v)
+ +static inline void native_apic_mem_write(u32 reg, u32 v)
{
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
ASM_OUTPUT2("0" (v), "m" (*addr)));
}
- -static inline u32 native_apic_read(unsigned long reg)
+ +static inline u32 native_apic_mem_read(u32 reg)
{
return *((volatile u32 *)(APIC_BASE + reg));
}
- -extern void apic_wait_icr_idle(void);
- -extern u32 safe_apic_wait_icr_idle(void);
+ +static inline void native_apic_msr_write(u32 reg, u32 v)
+ +{
+ + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
+ + reg == APIC_LVR)
+ + return;
+ +
+ + wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
+ +}
+ +
+ +static inline u32 native_apic_msr_read(u32 reg)
+ +{
+ + u32 low, high;
+ +
+ + if (reg == APIC_DFR)
+ + return -1;
+ +
+ + rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
+ + return low;
+ +}
+ +
+ +#ifndef CONFIG_X86_32
+ +extern int x2apic, x2apic_preenabled;
+ +extern void check_x2apic(void);
+ +extern void enable_x2apic(void);
+ +extern void enable_IR_x2apic(void);
+ +extern void x2apic_icr_write(u32 low, u32 id);
+ +#endif
+ +
+ +struct apic_ops {
+ + u32 (*read)(u32 reg);
+ + void (*write)(u32 reg, u32 v);
+ + u64 (*icr_read)(void);
+ + void (*icr_write)(u32 low, u32 high);
+ + void (*wait_icr_idle)(void);
+ + u32 (*safe_wait_icr_idle)(void);
+ +};
+ +
+ +extern struct apic_ops *apic_ops;
+ +
+ +#define apic_read (apic_ops->read)
+ +#define apic_write (apic_ops->write)
+ +#define apic_icr_read (apic_ops->icr_read)
+ +#define apic_icr_write (apic_ops->icr_write)
+ +#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
+ +#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
+ +
extern int get_physical_broadcast(void);
+ +#ifdef CONFIG_X86_64
+ +static inline void ack_x2APIC_irq(void)
+ +{
+ + /* Docs say use 0 for future compatibility */
+ + native_apic_msr_write(APIC_EOI, 0);
+ +}
+ +#endif
+ +
+ +
static inline void ack_APIC_irq(void)
{
/*
#endif /* !CONFIG_X86_LOCAL_APIC */
---#endif /* __ASM_APIC_H */
+++#endif /* ASM_X86__APIC_H */
---#ifndef __ASM_E820_H
---#define __ASM_E820_H
+++#ifndef ASM_X86__E820_H
+++#define ASM_X86__E820_H
#define E820MAP 0x2d0 /* our map */
#define E820MAX 128 /* number of entries in E820MAP */
extern struct e820map e820;
extern struct e820map e820_saved;
+ ++extern unsigned long pci_mem_start;
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
extern void e820_add_region(u64 start, u64 size, int type);
#define HIGH_MEMORY (1024*1024)
#endif /* __KERNEL__ */
---#endif /* __ASM_E820_H */
+++#endif /* ASM_X86__E820_H */
---#ifndef _ASM_HW_IRQ_H
---#define _ASM_HW_IRQ_H
+++#ifndef ASM_X86__HW_IRQ_H
+++#define ASM_X86__HW_IRQ_H
/*
* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
#endif
/* IPI functions */
+ +#ifdef CONFIG_X86_32
extern void send_IPI_self(int vector);
+ +#endif
extern void send_IPI(int dest, int vector);
/* Statistics */
extern asmlinkage void qic_enable_irq_interrupt(void);
extern asmlinkage void qic_call_function_interrupt(void);
+ ++/* SMP */
+ ++extern void smp_apic_timer_interrupt(struct pt_regs *);
+ ++#ifdef CONFIG_X86_32
+ ++extern void smp_spurious_interrupt(struct pt_regs *);
+ ++extern void smp_error_interrupt(struct pt_regs *);
+ ++#else
+ ++extern asmlinkage void smp_spurious_interrupt(void);
+ ++extern asmlinkage void smp_error_interrupt(void);
+ ++#endif
+ ++#ifdef CONFIG_X86_SMP
+ ++extern void smp_reschedule_interrupt(struct pt_regs *);
+ ++extern void smp_call_function_interrupt(struct pt_regs *);
+ ++extern void smp_call_function_single_interrupt(struct pt_regs *);
+ ++#ifdef CONFIG_X86_32
+ ++extern void smp_invalidate_interrupt(struct pt_regs *);
+ ++#else
+ ++extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
+ ++#endif
+ ++#endif
+ ++
#ifdef CONFIG_X86_32
extern void (*const interrupt[NR_IRQS])(void);
#else
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
--extern spinlock_t vector_lock;
#endif
--extern void setup_vector_irq(int cpu);
++
++#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64)
++extern void lock_vector_lock(void);
++extern void unlock_vector_lock(void);
++extern void __setup_vector_irq(int cpu);
++#else
++static inline void lock_vector_lock(void) {}
++static inline void unlock_vector_lock(void) {}
++static inline void __setup_vector_irq(int cpu) {}
++#endif
#endif /* !ASSEMBLY_ */
---#endif
+++#endif /* ASM_X86__HW_IRQ_H */
* x86-64 work by Andi Kleen 2002
*/
---#ifndef _ASM_X86_I387_H
---#define _ASM_X86_I387_H
+++#ifndef ASM_X86__I387_H
+++#define ASM_X86__I387_H
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/regset.h>
+++#include <linux/hardirq.h>
#include <asm/asm.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
extern int init_fpu(struct task_struct *child);
extern asmlinkage void math_state_restore(void);
extern void init_thread_xstate(void);
+ ++extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern user_regset_active_fn fpregs_active, xfpregs_active;
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
#else
: [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
#endif
--- if (unlikely(err))
--- init_fpu(current);
return err;
}
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
-/*
- * Signal frame handlers.
- */
-
-static inline int save_i387(struct _fpstate __user *buf)
-{
- struct task_struct *tsk = current;
- int err = 0;
-
- BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
- sizeof(tsk->thread.xstate->fxsave));
-
- if ((unsigned long)buf % 16)
- printk("save_i387: bad fpstate %p\n", buf);
-
- if (!used_math())
- return 0;
- clear_used_math(); /* trigger finit */
- if (task_thread_info(tsk)->status & TS_USEDFPU) {
- err = save_i387_checking((struct i387_fxsave_struct __user *)
- buf);
- if (err)
- return err;
- task_thread_info(tsk)->status &= ~TS_USEDFPU;
- stts();
- } else {
- if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
- sizeof(struct i387_fxsave_struct)))
- return -1;
- }
- return 1;
-}
-
-/*
- * This restores directly out of user space. Exceptions are handled.
- */
-static inline int restore_i387(struct _fpstate __user *buf)
-{
- struct task_struct *tsk = current;
- int err;
-
- if (!used_math()) {
- err = init_fpu(tsk);
- if (err)
- return err;
- }
-
- if (!(task_thread_info(current)->status & TS_USEDFPU)) {
- clts();
- task_thread_info(current)->status |= TS_USEDFPU;
- }
- return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
-}
-
#else /* CONFIG_X86_32 */
extern void finit(void);
preempt_enable();
}
+++/*
+++ * Some instructions like VIA's padlock instructions generate a spurious
+++ * DNA fault but don't modify SSE registers. And these instructions
+++ * get used from interrupt context aswell. To prevent these kernel instructions
+++ * in interrupt context interact wrongly with other user/kernel fpu usage, we
+++ * should use them only in the context of irq_ts_save/restore()
+++ */
+++static inline int irq_ts_save(void)
+++{
+++ /*
+++ * If we are in process context, we are ok to take a spurious DNA fault.
+++ * Otherwise, doing clts() in process context require pre-emption to
+++ * be disabled or some heavy lifting like kernel_fpu_begin()
+++ */
+++ if (!in_interrupt())
+++ return 0;
+++
+++ if (read_cr0() & X86_CR0_TS) {
+++ clts();
+++ return 1;
+++ }
+++
+++ return 0;
+++}
+++
+++static inline void irq_ts_restore(int TS_state)
+++{
+++ if (TS_state)
+++ stts();
+++}
+++
#ifdef CONFIG_X86_64
static inline void save_init_fpu(struct task_struct *tsk)
}
}
---#endif /* _ASM_X86_I387_H */
+++#endif /* ASM_X86__I387_H */
---#ifndef _ASM_X86_IO_H
---#define _ASM_X86_IO_H
+++#ifndef ASM_X86__IO_H
+++#define ASM_X86__IO_H
#define ARCH_HAS_IOREMAP_WC
#define writeq writeq
#endif
+ ++extern int iommu_bio_merge;
+ ++
#ifdef CONFIG_X86_32
# include "io_32.h"
#else
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
---#endif /* _ASM_X86_IO_H */
+++#endif /* ASM_X86__IO_H */
---#ifndef _ASM_IO_H
---#define _ASM_IO_H
+++#ifndef ASM_X86__IO_64_H
+++#define ASM_X86__IO_64_H
/*
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
+ unsigned long prot_val);
/*
* The default ioremap() behavior is non-cached:
#define flush_write_buffers()
- --extern int iommu_bio_merge;
#define BIO_VMERGE_BOUNDARY iommu_bio_merge
/*
#endif /* __KERNEL__ */
---#endif
+++#endif /* ASM_X86__IO_64_H */
---#ifndef _ASM_X86_PAGE_32_H
---#define _ASM_X86_PAGE_32_H
+++#ifndef ASM_X86__PAGE_32_H
+++#define ASM_X86__PAGE_32_H
/*
* This handles the memory map.
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long, unsigned long);
+ ++extern void free_initmem(void);
extern void setup_bootmem_allocator(void);
#endif /* CONFIG_X86_3DNOW */
#endif /* !__ASSEMBLY__ */
---#endif /* _ASM_X86_PAGE_32_H */
+++#endif /* ASM_X86__PAGE_32_H */
---#ifndef _X86_64_PAGE_H
---#define _X86_64_PAGE_H
+++#ifndef ASM_X86__PAGE_64_H
+++#define ASM_X86__PAGE_64_H
#define PAGETABLE_LEVELS 4
unsigned long end);
extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
+ ++extern void free_initmem(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#endif
---#endif /* _X86_64_PAGE_H */
+++#endif /* ASM_X86__PAGE_64_H */
---#ifndef _ASM_X86_PGTABLE_H
---#define _ASM_X86_PGTABLE_H
+++#ifndef ASM_X86__PGTABLE_H
+++#define ASM_X86__PGTABLE_H
#define FIRST_USER_ADDRESS 0
#define _PAGE_BIT_UNUSED2 10
#define _PAGE_BIT_UNUSED3 11
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
+#define __HAVE_ARCH_PTE_SPECIAL
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
- _PAGE_ACCESSED | _PAGE_DIRTY)
+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
#define _PAGE_CACHE_WB (0)
static inline int pte_special(pte_t pte)
{
- return 0;
+ return pte_val(pte) & _PAGE_SPECIAL;
}
static inline int pmd_large(pmd_t pte)
static inline pte_t pte_mkspecial(pte_t pte)
{
- return pte;
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
}
extern pteval_t __supported_pte_mask;
static inline void native_pagetable_setup_done(pgd_t *base) {}
#endif
+ ++extern int arch_report_meminfo(char *page);
+ ++
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
---#endif /* _ASM_X86_PGTABLE_H */
+++#endif /* ASM_X86__PGTABLE_H */
---#ifndef _I386_PGTABLE_H
---#define _I386_PGTABLE_H
+++#ifndef ASM_X86__PGTABLE_32_H
+++#define ASM_X86__PGTABLE_32_H
/*
static inline void check_pgt_cache(void) { }
void paging_init(void);
+ ++extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
/*
* The Linux x86 paging architecture is 'compile-time dual-mode', it
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
---#endif /* _I386_PGTABLE_H */
+++#endif /* ASM_X86__PGTABLE_32_H */
---#ifndef __ASM_X86_PROCESSOR_H
---#define __ASM_X86_PROCESSOR_H
+++#ifndef ASM_X86__PROCESSOR_H
+++#define ASM_X86__PROCESSOR_H
#include <asm/processor-flags.h>
#define current_cpu_data boot_cpu_data
#endif
+ ++extern const struct seq_operations cpuinfo_op;
+ ++
static inline int hlt_works(int cpu)
{
#ifdef CONFIG_X86_32
extern void cpu_detect(struct cpuinfo_x86 *c);
+ ++extern struct pt_regs *idle_regs(struct pt_regs *);
+ ++
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);
---#endif
+++#endif /* ASM_X86__PROCESSOR_H */
---#ifndef _ASM_X86_PTRACE_H
---#define _ASM_X86_PTRACE_H
+++#ifndef ASM_X86__PTRACE_H
+++#define ASM_X86__PTRACE_H
#include <linux/compiler.h> /* For __user */
#include <asm/ptrace-abi.h>
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
#endif
+ ++extern long syscall_trace_enter(struct pt_regs *);
+ ++extern void syscall_trace_leave(struct pt_regs *);
+ ++
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->ax;
#endif /* !__ASSEMBLY__ */
---#endif
+++#endif /* ASM_X86__PTRACE_H */
---#ifndef _ASM_X86_SETUP_H
---#define _ASM_X86_SETUP_H
+++#ifndef ASM_X86__SETUP_H
+++#define ASM_X86__SETUP_H
#define COMMAND_LINE_SIZE 2048
void (*mpc_oem_pci_bus)(struct mpc_config_bus *m);
void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable,
unsigned short oemsize);
+++ int (*setup_ioapic_ids)(void);
};
extern struct x86_quirks *x86_quirks;
+ ++extern unsigned long saved_video_mode;
#ifndef CONFIG_PARAVIRT
#define paravirt_post_allocator_init() do {} while (0)
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
---#endif /* _ASM_X86_SETUP_H */
+++#endif /* ASM_X86__SETUP_H */
---#ifndef _ASM_X86_SIGNAL_H
---#define _ASM_X86_SIGNAL_H
+++#ifndef ASM_X86__SIGNAL_H
+++#define ASM_X86__SIGNAL_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
struct k_sigaction {
struct sigaction sa;
};
+ ++
+ ++extern void do_notify_resume(struct pt_regs *, void *, __u32);
+ ++
# else /* __KERNEL__ */
/* Here we must cater to libcs that poke about in kernel headers. */
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
---#endif
+++#endif /* ASM_X86__SIGNAL_H */
---#ifndef _ASM_X86_SMP_H_
---#define _ASM_X86_SMP_H_
+++#ifndef ASM_X86__SMP_H
+++#define ASM_X86__SMP_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
#include <linux/init.h>
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
+ ++#ifdef CONFIG_X86_32
+ ++DECLARE_PER_CPU(int, cpu_number);
+ ++#endif
DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
- --DECLARE_PER_CPU(int, cpu_number);
#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
extern int safe_smp_processor_id(void);
#ifdef CONFIG_X86_LOCAL_APIC
+ +#ifndef CONFIG_X86_64
static inline int logical_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
}
- -#ifndef CONFIG_X86_64
+ +#include <mach_apicdef.h>
static inline unsigned int read_apic_id(void)
{
- - return *(u32 *)(APIC_BASE + APIC_ID);
+ + unsigned int reg;
+ +
+ + reg = *(u32 *)(APIC_BASE + APIC_ID);
+ +
+ + return GET_APIC_ID(reg);
}
- -#else
- -extern unsigned int read_apic_id(void);
#endif
- -# ifdef APIC_DEFINITION
+ +# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
extern int hard_smp_processor_id(void);
# else
- -# include <mach_apicdef.h>
+ +#include <mach_apicdef.h>
static inline int hard_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
- - return GET_APIC_ID(read_apic_id());
+ + return read_apic_id();
}
# endif /* APIC_DEFINITION */
#endif
#endif /* __ASSEMBLY__ */
---#endif
+++#endif /* ASM_X86__SMP_H */
---#ifndef _ASMX86_TIME_H
---#define _ASMX86_TIME_H
+++#ifndef ASM_X86__TIME_H
+++#define ASM_X86__TIME_H
extern void hpet_time_init(void);
#endif
+ ++extern void time_init(void);
+ ++
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
extern unsigned long __init calibrate_cpu(void);
---#endif
+++#endif /* ASM_X86__TIME_H */
---#ifndef _ASMi386_TIMER_H
---#define _ASMi386_TIMER_H
+++#ifndef ASM_X86__TIMER_H
+++#define ASM_X86__TIMER_H
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/percpu.h>
unsigned long long native_sched_clock(void);
unsigned long native_calibrate_tsc(void);
+ ++#ifdef CONFIG_X86_32
extern int timer_ack;
- --extern int no_timer_check;
extern int recalibrate_cpu_khz(void);
+ ++#endif /* CONFIG_X86_32 */
+ ++
+ ++extern int no_timer_check;
#ifndef CONFIG_PARAVIRT
#define calibrate_tsc() native_calibrate_tsc()
return ns;
}
---#endif
+++#endif /* ASM_X86__TIMER_H */
---#ifndef _ASM_X86_TRAPS_H
---#define _ASM_X86_TRAPS_H
+++#ifndef ASM_X86__TRAPS_H
+++#define ASM_X86__TRAPS_H
/* Common in X86_32 and X86_64 */
asmlinkage void divide_error(void);
unsigned long patch_espfix_desc(unsigned long, unsigned long);
asmlinkage void math_emulate(long);
+ ++void do_page_fault(struct pt_regs *regs, unsigned long error_code);
+ ++
#else /* CONFIG_X86_32 */
asmlinkage void double_fault(void);
asmlinkage void do_simd_coprocessor_error(struct pt_regs *);
asmlinkage void do_spurious_interrupt_bug(struct pt_regs *);
+ ++asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code);
+ ++
#endif /* CONFIG_X86_32 */
---#endif /* _ASM_X86_TRAPS_H */
+++#endif /* ASM_X86__TRAPS_H */