panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
}
-static volatile int daddi_ov __initdata = 0;
+static volatile int daddi_ov __cpuinitdata = 0;
asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
{
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug __initdata = -1;
+int daddiu_bug __cpuinitdata = -1;
static inline void check_daddiu(void)
{
}
}
-static char unknown_isa[] __initdata = KERN_ERR \
+static char unknown_isa[] __cpuinitdata = KERN_ERR \
"Unsupported ISA type, c0.config0: %d.";
static inline unsigned int decode_config0(struct cpuinfo_mips *c)
return config3 & MIPS_CONF_M;
}
-static void __init decode_configs(struct cpuinfo_mips *c)
+static void __cpuinit decode_configs(struct cpuinfo_mips *c)
{
/* MIPS32 or MIPS64 compliant CPU. */
c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
/*
* Name a CPU
*/
-static __init const char *cpu_to_name(struct cpuinfo_mips *c)
+static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c)
{
const char *name = NULL;
return name;
}
-__init void cpu_probe(void)
+__cpuinit void cpu_probe(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int cpu = smp_processor_id();
c->srsets = 1;
}
-__init void cpu_report(void)
+__cpuinit void cpu_report(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
j start_kernel
END(kernel_entry)
- __INIT
+ __CPUINIT
#ifdef CONFIG_SMP
/*
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
-void __init per_cpu_trap_init(void)
+void __cpuinit per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
flush_icache_range(ebase + offset, ebase + offset + size);
}
-static char panic_null_cerr[] __initdata =
+static char panic_null_cerr[] __cpuinitdata =
"Trying to set NULL cache error exception handler";
/* Install uncached CPU exception handler */
-void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size)
+void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
+ unsigned long size)
{
#ifdef CONFIG_32BIT
unsigned long uncached_ebase = KSEG1ADDR(ebase);
* values, so we can avoid sharing the same stack area between a cached
* and the uncached mode.
*/
-unsigned long __init run_uncached(void *func)
+unsigned long __cpuinit run_uncached(void *func)
{
register long sp __asm__("$sp");
register long ret __asm__("$2");
}
}
-unsigned int __init get_c0_compare_int(void)
+unsigned int __cpuinit get_c0_compare_int(void)
{
#ifdef MSC01E_INT_BASE
if (cpu_has_veic) {
}
-unsigned __init get_c0_compare_int(void)
+unsigned __cpuinit get_c0_compare_int(void)
{
#ifdef MSC01E_INT_BASE
if (cpu_has_veic) {
r3k_flush_dcache_range(start, start + size);
}
-void __init r3k_cache_init(void)
+void __cpuinit r3k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
blast_dcache32_page(addr);
}
-static void __init r4k_blast_dcache_page_setup(void)
+static void __cpuinit r4k_blast_dcache_page_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
-static void __init r4k_blast_dcache_page_indexed_setup(void)
+static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
static void (* r4k_blast_dcache)(void);
-static void __init r4k_blast_dcache_setup(void)
+static void __cpuinit r4k_blast_dcache_setup(void)
{
unsigned long dc_lsize = cpu_dcache_line_size();
static void (* r4k_blast_icache_page)(unsigned long addr);
-static void __init r4k_blast_icache_page_setup(void)
+static void __cpuinit r4k_blast_icache_page_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
-static void __init r4k_blast_icache_page_indexed_setup(void)
+static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
static void (* r4k_blast_icache)(void);
-static void __init r4k_blast_icache_setup(void)
+static void __cpuinit r4k_blast_icache_setup(void)
{
unsigned long ic_lsize = cpu_icache_line_size();
static void (* r4k_blast_scache_page)(unsigned long addr);
-static void __init r4k_blast_scache_page_setup(void)
+static void __cpuinit r4k_blast_scache_page_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
-static void __init r4k_blast_scache_page_indexed_setup(void)
+static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
static void (* r4k_blast_scache)(void);
-static void __init r4k_blast_scache_setup(void)
+static void __cpuinit r4k_blast_scache_setup(void)
{
unsigned long sc_lsize = cpu_scache_line_size();
}
}
-static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
+static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
};
-static void __init probe_pcache(void)
+static void __cpuinit probe_pcache(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config = read_c0_config();
* executes in KSEG1 space or else you will crash and burn badly. You have
* been warned.
*/
-static int __init probe_scache(void)
+static int __cpuinit probe_scache(void)
{
unsigned long flags, addr, begin, end, pow2;
unsigned int config = read_c0_config();
extern int rm7k_sc_init(void);
extern int mips_sc_init(void);
-static void __init setup_scache(void)
+static void __cpuinit setup_scache(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config = read_c0_config();
}
}
-static void __init coherency_setup(void)
+static void __cpuinit coherency_setup(void)
{
change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
}
}
-void __init r4k_cache_init(void)
+void __cpuinit r4k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
}
}
-void __init tx39_cache_init(void)
+void __cpuinit tx39_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
}
}
-static char cache_panic[] __initdata = "Yeee, unsupported cache architecture.";
+static char cache_panic[] __cpuinitdata =
+ "Yeee, unsupported cache architecture.";
-void __init cpu_cache_init(void)
+void __devinit cpu_cache_init(void)
{
if (cpu_has_3k_cache) {
extern void __weak r3k_cache_init(void);
* is changed.
*/
- __INIT
-
.set mips64
.set noreorder
.set noat
* (0x170-0x17f) are used to preserve k0, k1, and ra.
*/
+ __CPUINIT
+
LEAF(except_vec2_sb1)
/*
* If this error is recoverable, we need to exit the handler
* with 64-bit kernels. The prefetch offsets have been experimentally tuned
* an Origin 200.
*/
-static int pref_offset_clear __initdata = 512;
-static int pref_offset_copy __initdata = 256;
+static int pref_offset_clear __cpuinitdata = 512;
+static int pref_offset_copy __cpuinitdata = 256;
-static unsigned int pref_src_mode __initdata;
-static unsigned int pref_dst_mode __initdata;
+static unsigned int pref_src_mode __cpuinitdata;
+static unsigned int pref_dst_mode __cpuinitdata;
-static int load_offset __initdata;
-static int store_offset __initdata;
+static int load_offset __cpuinitdata;
+static int store_offset __cpuinitdata;
-static unsigned int __initdata *dest, *epc;
+static unsigned int __cpuinitdata *dest, *epc;
static unsigned int instruction_pending;
static union mips_instruction delayed_mi;
-static void __init emit_instruction(union mips_instruction mi)
+static void __cpuinit emit_instruction(union mips_instruction mi)
{
if (instruction_pending)
*epc++ = delayed_mi.word;
emit_instruction(mi);
}
-static void __init __build_store_reg(int reg)
+static void __cpuinit __build_store_reg(int reg)
{
union mips_instruction mi;
unsigned int width;
flush_delay_slot_or_nop();
}
-void __init build_clear_page(void)
+void __cpuinit build_clear_page(void)
{
unsigned int loop_start;
unsigned long off;
pr_debug("\t.set pop\n");
}
-void __init build_copy_page(void)
+void __cpuinit build_copy_page(void)
{
unsigned int loop_start;
unsigned long off;
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
-void __init build_clear_page(void)
+void __cpuinit build_clear_page(void)
{
}
-void __init build_copy_page(void)
+void __cpuinit build_copy_page(void)
{
}
.bc_inv = indy_sc_wback_invalidate
};
-void __init indy_sc_init(void)
+void __cpuinit indy_sc_init(void)
{
if (indy_sc_probe()) {
indy_sc_enable();
return 1;
}
-int __init mips_sc_init(void)
+int __cpuinit mips_sc_init(void)
{
int found = mips_sc_probe();
if (found) {
}
return found;
}
-
.bc_inv = r5k_dma_cache_inv_sc
};
-void __init r5k_sc_init(void)
+void __cpuinit r5k_sc_init(void)
{
if (r5k_sc_probe()) {
r5k_sc_enable();
.bc_inv = rm7k_sc_inv
};
-void __init rm7k_sc_init(void)
+void __cpuinit rm7k_sc_init(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config = read_c0_config();
}
}
-void __init tlb_init(void)
+void __cpuinit tlb_init(void)
{
local_flush_tlb_all();
* lifetime of the system
*/
-static int temp_tlb_entry __initdata;
+static int temp_tlb_entry __cpuinitdata;
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
return ret;
}
-static void __init probe_tlb(unsigned long config)
+static void __cpuinit probe_tlb(unsigned long config)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int reg;
c->tlbsize = ((reg >> 25) & 0x3f) + 1;
}
-static int __initdata ntlb = 0;
+static int __cpuinitdata ntlb = 0;
static int __init set_ntlb(char *str)
{
get_option(&str, &ntlb);
__setup("ntlb=", set_ntlb);
-void __init tlb_init(void)
+void __cpuinit tlb_init(void)
{
unsigned int config = read_c0_config();
local_irq_restore(flags);
}
-static void __init probe_tlb(unsigned long config)
+static void __cpuinit probe_tlb(unsigned long config)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
}
-void __init tlb_init(void)
+void __cpuinit tlb_init(void)
{
unsigned int config = read_c0_config();
unsigned long status;
* why; it's not an issue caused by the core RTL.
*
*/
-static int __init m4kc_tlbp_war(void)
+static int __cpuinit m4kc_tlbp_war(void)
{
return (current_cpu_data.processor_id & 0xffff00) ==
(PRID_COMP_MIPS | PRID_IMP_4KC);
* We deliberately chose a buffer size of 128, so we won't scribble
* over anything important on overflow before we panic.
*/
-static u32 tlb_handler[128] __initdata;
+static u32 tlb_handler[128] __cpuinitdata;
/* simply assume worst case size for labels and relocs */
-static struct uasm_label labels[128] __initdata;
-static struct uasm_reloc relocs[128] __initdata;
+static struct uasm_label labels[128] __cpuinitdata;
+static struct uasm_reloc relocs[128] __cpuinitdata;
/*
* The R3000 TLB handler is simple.
*/
-static void __init build_r3000_tlb_refill_handler(void)
+static void __cpuinit build_r3000_tlb_refill_handler(void)
{
long pgdc = (long)pgd_current;
u32 *p;
* other one.To keep things simple, we first assume linear space,
* then we relocate it to the final handler layout as needed.
*/
-static u32 final_handler[64] __initdata;
+static u32 final_handler[64] __cpuinitdata;
/*
* Hazards
*
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/
-static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
+static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
{
switch (current_cpu_type()) {
/* Found by experiment: R4600 v2.0 needs this, too. */
*/
enum tlb_write_entry { tlb_random, tlb_indexed };
-static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l,
+static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_reloc **r,
enum tlb_write_entry wmode)
{
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry.
*/
-static void __init
+static void __cpuinit
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int tmp, unsigned int ptr)
{
* BVADDR is the faulting address, PTR is scratch.
* PTR will hold the pgd for vmalloc.
*/
-static void __init
+static void __cpuinit
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int bvaddr, unsigned int ptr)
{
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry.
*/
-static void __init __maybe_unused
+static void __cpuinit __maybe_unused
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
long pgdc = (long)pgd_current;
#endif /* !CONFIG_64BIT */
-static void __init build_adjust_context(u32 **p, unsigned int ctx)
+static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
{
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
uasm_i_andi(p, ctx, ctx, mask);
}
-static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
+static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{
/*
* Bug workaround for the Nevada. It seems as if under certain
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
}
-static void __init build_update_entries(u32 **p, unsigned int tmp,
+static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
unsigned int ptep)
{
/*
#endif
}
-static void __init build_r4000_tlb_refill_handler(void)
+static void __cpuinit build_r4000_tlb_refill_handler(void)
{
u32 *p = tlb_handler;
struct uasm_label *l = labels;
u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
-static void __init
+static void __cpuinit
iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
{
#ifdef CONFIG_SMP
#endif
}
-static void __init
+static void __cpuinit
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int mode)
{
* the page table where this PTE is located, PTE will be re-loaded
* with it's original value.
*/
-static void __init
+static void __cpuinit
build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, enum label_id lid)
{
}
/* Make PTE valid, store result in PTR. */
-static void __init
+static void __cpuinit
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr)
{
* Check if PTE can be written to, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
-static void __init
+static void __cpuinit
build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, enum label_id lid)
{
/* Make PTE writable, update software status bits as well, then store
* at PTR.
*/
-static void __init
+static void __cpuinit
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr)
{
* Check if PTE can be modified, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done.
*/
-static void __init
+static void __cpuinit
build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, enum label_id lid)
{
* This places the pte into ENTRYLO0 and writes it with tlbwi.
* Then it returns.
*/
-static void __init
+static void __cpuinit
build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
{
uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
* may have the probe fail bit set as a result of a trap on a
* kseg2 access, i.e. without refill. Then it returns.
*/
-static void __init
+static void __cpuinit
build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int pte,
unsigned int tmp)
uasm_i_rfe(p); /* branch delay */
}
-static void __init
+static void __cpuinit
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
unsigned int ptr)
{
uasm_i_tlbp(p); /* load delay */
}
-static void __init build_r3000_tlb_load_handler(void)
+static void __cpuinit build_r3000_tlb_load_handler(void)
{
u32 *p = handle_tlbl;
struct uasm_label *l = labels;
dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
}
-static void __init build_r3000_tlb_store_handler(void)
+static void __cpuinit build_r3000_tlb_store_handler(void)
{
u32 *p = handle_tlbs;
struct uasm_label *l = labels;
dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
}
-static void __init build_r3000_tlb_modify_handler(void)
+static void __cpuinit build_r3000_tlb_modify_handler(void)
{
u32 *p = handle_tlbm;
struct uasm_label *l = labels;
/*
* R4000 style TLB load/store/modify handlers.
*/
-static void __init
+static void __cpuinit
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int pte,
unsigned int ptr)
build_tlb_probe_entry(p);
}
-static void __init
+static void __cpuinit
build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp,
unsigned int ptr)
#endif
}
-static void __init build_r4000_tlb_load_handler(void)
+static void __cpuinit build_r4000_tlb_load_handler(void)
{
u32 *p = handle_tlbl;
struct uasm_label *l = labels;
dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
}
-static void __init build_r4000_tlb_store_handler(void)
+static void __cpuinit build_r4000_tlb_store_handler(void)
{
u32 *p = handle_tlbs;
struct uasm_label *l = labels;
dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
}
-static void __init build_r4000_tlb_modify_handler(void)
+static void __cpuinit build_r4000_tlb_modify_handler(void)
{
u32 *p = handle_tlbm;
struct uasm_label *l = labels;
dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
}
-void __init build_tlb_refill_handler(void)
+void __cpuinit build_tlb_refill_handler(void)
{
/*
* The refill handler is generated per-CPU, multi-node systems
}
}
-void __init flush_tlb_handlers(void)
+void __cpuinit flush_tlb_handlers(void)
{
flush_icache_range((unsigned long)handle_tlbl,
(unsigned long)handle_tlbl + sizeof(handle_tlbl));
| (e) << RE_SH \
| (f) << FUNC_SH)
-static struct insn insn_table[] __initdata = {
+static struct insn insn_table[] __cpuinitdata = {
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
#undef M
-static inline __init u32 build_rs(u32 arg)
+static inline __cpuinit u32 build_rs(u32 arg)
{
if (arg & ~RS_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RS_MASK) << RS_SH;
}
-static inline __init u32 build_rt(u32 arg)
+static inline __cpuinit u32 build_rt(u32 arg)
{
if (arg & ~RT_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RT_MASK) << RT_SH;
}
-static inline __init u32 build_rd(u32 arg)
+static inline __cpuinit u32 build_rd(u32 arg)
{
if (arg & ~RD_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RD_MASK) << RD_SH;
}
-static inline __init u32 build_re(u32 arg)
+static inline __cpuinit u32 build_re(u32 arg)
{
if (arg & ~RE_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RE_MASK) << RE_SH;
}
-static inline __init u32 build_simm(s32 arg)
+static inline __cpuinit u32 build_simm(s32 arg)
{
if (arg > 0x7fff || arg < -0x8000)
printk(KERN_WARNING "Micro-assembler field overflow\n");
return arg & 0xffff;
}
-static inline __init u32 build_uimm(u32 arg)
+static inline __cpuinit u32 build_uimm(u32 arg)
{
if (arg & ~IMM_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
return arg & IMM_MASK;
}
-static inline __init u32 build_bimm(s32 arg)
+static inline __cpuinit u32 build_bimm(s32 arg)
{
if (arg > 0x1ffff || arg < -0x20000)
printk(KERN_WARNING "Micro-assembler field overflow\n");
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
}
-static inline __init u32 build_jimm(u32 arg)
+static inline __cpuinit u32 build_jimm(u32 arg)
{
if (arg & ~((JIMM_MASK) << 2))
printk(KERN_WARNING "Micro-assembler field overflow\n");
return (arg >> 2) & JIMM_MASK;
}
-static inline __init u32 build_func(u32 arg)
+static inline __cpuinit u32 build_func(u32 arg)
{
if (arg & ~FUNC_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
return arg & FUNC_MASK;
}
-static inline __init u32 build_set(u32 arg)
+static inline __cpuinit u32 build_set(u32 arg)
{
if (arg & ~SET_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
*/
-static void __init build_insn(u32 **buf, enum opcode opc, ...)
+static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
{
struct insn *ip = NULL;
unsigned int i;
I_u2u1u3(_xori)
/* Handle labels. */
-void __init uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
(*lab)++;
}
-int __init uasm_in_compat_space_p(long addr)
+int __cpuinit uasm_in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT
#endif
}
-int __init uasm_rel_highest(long val)
+int __cpuinit uasm_rel_highest(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
#endif
}
-int __init uasm_rel_higher(long val)
+int __cpuinit uasm_rel_higher(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
#endif
}
-int __init uasm_rel_hi(long val)
+int __cpuinit uasm_rel_hi(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
-int __init uasm_rel_lo(long val)
+int __cpuinit uasm_rel_lo(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
-void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+void __cpuinit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{
if (!uasm_in_compat_space_p(addr)) {
uasm_i_lui(buf, rs, uasm_rel_highest(addr));
uasm_i_lui(buf, rs, uasm_rel_hi(addr));
}
-void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+void __cpuinit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
{
UASM_i_LA_mostly(buf, rs, addr);
if (uasm_rel_lo(addr)) {
}
/* Handle relocations. */
-void __init
+void __cpuinit
uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
(*rel)++;
}
-static inline void __init
+static inline void __cpuinit
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
long laddr = (long)lab->addr;
}
}
-void __init
+void __cpuinit
uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
struct uasm_label *l;
__resolve_relocs(rel, l);
}
-void __init
+void __cpuinit
uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
rel->addr += off;
}
-void __init
+void __cpuinit
uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
lab->addr += off;
}
-void __init
+void __cpuinit
uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
u32 *end, u32 *target)
{
uasm_move_labels(lab, first, end, off);
}
-int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+int __cpuinit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
}
/* Convenience functions for labeled branches. */
-void __init
+void __cpuinit
uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bltz(p, reg, 0);
}
-void __init
+void __cpuinit
uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_b(p, 0);
}
-void __init
+void __cpuinit
uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_beqz(p, reg, 0);
}
-void __init
+void __cpuinit
uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_beqzl(p, reg, 0);
}
-void __init
+void __cpuinit
uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bnez(p, reg, 0);
}
-void __init
+void __cpuinit
uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bgezl(p, reg, 0);
}
-void __init
+void __cpuinit
uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
#include <linux/types.h>
#define Ip_u1u2u3(op) \
-void __init \
+void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \
-void __init \
+void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u1u2(op) \
-void __init \
+void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u1u2s3(op) \
-void __init \
+void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2s3u1(op) \
-void __init \
+void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
#define Ip_u2u1s3(op) \
-void __init \
+void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u1u2(op) \
-void __init uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
+void __cpuinit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u1s2(op) \
-void __init uasm_i##op(u32 **buf, unsigned int a, signed int b)
+void __cpuinit uasm_i##op(u32 **buf, unsigned int a, signed int b)
-#define Ip_u1(op) void __init uasm_i##op(u32 **buf, unsigned int a)
+#define Ip_u1(op) void __cpuinit uasm_i##op(u32 **buf, unsigned int a)
-#define Ip_0(op) void __init uasm_i##op(u32 **buf)
+#define Ip_0(op) void __cpuinit uasm_i##op(u32 **buf)
Ip_u2u1s3(_addiu);
Ip_u3u1u2(_addu);
int lab;
};
-void __init uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
+void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
#ifdef CONFIG_64BIT
-int __init uasm_in_compat_space_p(long addr);
-int __init uasm_rel_highest(long val);
-int __init uasm_rel_higher(long val);
+int uasm_in_compat_space_p(long addr);
+int uasm_rel_highest(long val);
+int uasm_rel_higher(long val);
#endif
-int __init uasm_rel_hi(long val);
-int __init uasm_rel_lo(long val);
-void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
-void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr);
+int uasm_rel_hi(long val);
+int uasm_rel_lo(long val);
+void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
+void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
#define UASM_L_LA(lb) \
-static inline void __init uasm_l##lb(struct uasm_label **lab, u32 *addr) \
+static inline void __cpuinit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
{ \
uasm_build_label(lab, addr, label##lb); \
}
/* This is zero so we can use zeroed label arrays. */
#define UASM_LABEL_INVALID 0
-void __init uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
-void __init
-uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
-void __init
-uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
-void __init
-uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
-void __init
-uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
- u32 *end, u32 *target);
-int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
+void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
+void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
+void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
+void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
+void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
+ u32 *first, u32 *end, u32 *target);
+int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
/* Convenience functions for labeled branches. */
-void __init
-uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void __init uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
-void __init
-uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void __init
-uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void __init
-uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void __init
-uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void __init
-uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
+void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
extern struct pci_ops bridge_pci_ops;
-int __init bridge_probe(nasid_t nasid, int widget_id, int masterwid)
+int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
{
unsigned long offset = NODE_OFFSET(nasid);
struct bridge_controller *bc;
}
}
-void pcibios_fixup_bus(struct pci_bus *bus)
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{
/* Propagate hose info into the subordinate devices. */
extern void xtalk_probe_node(cnodeid_t nid);
-static void __init per_hub_init(cnodeid_t cnode)
+static void __cpuinit per_hub_init(cnodeid_t cnode)
{
struct hub_data *hub = hub_data(cnode);
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
set_c0_status(SRB_TIMOCLK);
}
-void __init hub_rtc_init(cnodeid_t cnode)
+void __cpuinit hub_rtc_init(cnodeid_t cnode)
{
/*
* We only need to initialize the current node.
extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
-static int __init probe_one_port(nasid_t nasid, int widget, int masterwid)
+static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
{
widgetreg_t widget_id;
xwidget_part_num_t partnum;
return 0;
}
-static int __init xbow_probe(nasid_t nasid)
+static int __cpuinit xbow_probe(nasid_t nasid)
{
lboard_t *brd;
klxbow_t *xbow_p;
return 0;
}
-void __init xtalk_probe_node(cnodeid_t nid)
+void __cpuinit xtalk_probe_node(cnodeid_t nid)
{
volatile u64 hubreg;
nasid_t nasid;
clear_bit(PG_dcache_dirty, &(page)->flags)
/* Run kernel code uncached, useful for cache probing functions. */
-unsigned long __init run_uncached(void *func);
+unsigned long run_uncached(void *func);
extern void *kmap_coherent(struct page *page, unsigned long addr);
extern void kunmap_coherent(void);