* path (ivt.S - TLB miss processing) or in places where it might not be
* safe to use a "tpa" instruction (mca_asm.S - error recovery).
*/
- .section ".data.patch.vtop", "a" // declare section & section attributes
+ .section ".data..patch.vtop", "a" // declare section & section attributes
.previous
#define LOAD_PHYSICAL(pr, reg, obj) \
[1:](pr)movl reg = obj; \
- .xdata4 ".data.patch.vtop", 1b-.
+ .xdata4 ".data..patch.vtop", 1b-.
/*
* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
#define DO_MCKINLEY_E9_WORKAROUND
#ifdef DO_MCKINLEY_E9_WORKAROUND
- .section ".data.patch.mckinley_e9", "a"
+ .section ".data..patch.mckinley_e9", "a"
.previous
/* workaround for Itanium 2 Errata 9: */
# define FSYS_RETURN \
- .xdata4 ".data.patch.mckinley_e9", 1f-.; \
+ .xdata4 ".data..patch.mckinley_e9", 1f-.; \
1:{ .mib; \
nop.m 0; \
mov r16=ar.pfs; \
* If physical stack register size is different from DEF_NUM_STACK_REG,
* dynamically patch the kernel for correct size.
*/
- .section ".data.patch.phys_stack_reg", "a"
+ .section ".data..patch.phys_stack_reg", "a"
.previous
#define LOAD_PHYS_STACK_REG_SIZE(reg) \
[1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \
- .xdata4 ".data.patch.phys_stack_reg", 1b-.
+ .xdata4 ".data..patch.phys_stack_reg", 1b-.
/*
* Up until early 2004, use of .align within a function caused bad unwind info.
* to targets outside the shared object) and to avoid multi-phase kernel builds, we
* simply create minimalistic "patch lists" in special ELF sections.
*/
- .section ".data.patch.fsyscall_table", "a"
+ .section ".data..patch.fsyscall_table", "a"
.previous
#define LOAD_FSYSCALL_TABLE(reg) \
[1:] movl reg=0; \
- .xdata4 ".data.patch.fsyscall_table", 1b-.
+ .xdata4 ".data..patch.fsyscall_table", 1b-.
- .section ".data.patch.brl_fsys_bubble_down", "a"
+ .section ".data..patch.brl_fsys_bubble_down", "a"
.previous
#define BRL_COND_FSYS_BUBBLE_DOWN(pr) \
[1:](pr)brl.cond.sptk 0; \
;; \
- .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
+ .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-.
GLOBAL_ENTRY(__kernel_syscall_via_break)
.prologue
*/
. = GATE_ADDR + 0x600;
- .data.patch : {
+ .data..patch : {
__paravirt_start_gate_mckinley_e9_patchlist = .;
- *(.data.patch.mckinley_e9)
+ *(.data..patch.mckinley_e9)
__paravirt_end_gate_mckinley_e9_patchlist = .;
__paravirt_start_gate_vtop_patchlist = .;
- *(.data.patch.vtop)
+ *(.data..patch.vtop)
__paravirt_end_gate_vtop_patchlist = .;
__paravirt_start_gate_fsyscall_patchlist = .;
- *(.data.patch.fsyscall_table)
+ *(.data..patch.fsyscall_table)
__paravirt_end_gate_fsyscall_patchlist = .;
__paravirt_start_gate_brl_fsys_bubble_down_patchlist = .;
- *(.data.patch.brl_fsys_bubble_down)
+ *(.data..patch.brl_fsys_bubble_down)
__paravirt_end_gate_brl_fsys_bubble_down_patchlist = .;
} :readable
#define ACCOUNT_SYS_ENTER
#endif
-.section ".data.patch.rse", "a"
+.section ".data..patch.rse", "a"
.previous
/*
(pUStk) extr.u r17=r18,3,6; \
(pUStk) sub r16=r18,r22; \
[1:](pKStk) br.cond.sptk.many 1f; \
- .xdata4 ".data.patch.rse",1b-. \
+ .xdata4 ".data..patch.rse",1b-. \
;; \
cmp.ge p6,p7 = 33,r17; \
;; \
__stop___mca_table = .;
}
- .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET)
+ .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET)
{
__start___phys_stack_reg_patchlist = .;
- *(.data.patch.phys_stack_reg)
+ *(.data..patch.phys_stack_reg)
__end___phys_stack_reg_patchlist = .;
}
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
- .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
+ .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET)
{
__start___vtop_patchlist = .;
- *(.data.patch.vtop)
+ *(.data..patch.vtop)
__end___vtop_patchlist = .;
}
- .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET)
+ .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET)
{
__start___rse_patchlist = .;
- *(.data.patch.rse)
+ *(.data..patch.rse)
__end___rse_patchlist = .;
}
- .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
+ .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET)
{
__start___mckinley_e9_bundles = .;
- *(.data.patch.mckinley_e9)
+ *(.data..patch.mckinley_e9)
__end___mckinley_e9_bundles = .;
}