KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+ ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+ KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
+ KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
+ endif
+
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
else
endchoice
+ config THIN_ARCHIVES
+ bool
+ help
+ Select this if the architecture wants to use thin archives
+ instead of ld -r to create the built-in.o files.
+
+ config LD_DEAD_CODE_DATA_ELIMINATION
+ bool
+ help
+ Select this if the architecture wants to do dead code and
+ data elimination with the linker by compiling with
+ -ffunction-sections -fdata-sections and linking with
+ --gc-sections.
+
+ This requires that the arch annotates or otherwise protects
+ its external entry points from being discarded. Linker scripts
+ must also merge .text.*, .data.*, and .bss.* correctly into
+ output sections. Care must be taken not to pull in unrelated
+ sections (e.g., '.text.init'). Typically '.' in section names
+ is used to distinguish them from label names / C identifiers.
+
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+ bool
+ help
+ An architecture should select this if it can walk the kernel stack
+ frames to determine if an object is part of either the arguments
+ or local variables (i.e. that it excludes saved return addresses,
+ and similar) by implementing an inline arch_within_stack_frames(),
+ which is used by CONFIG_HARDENED_USERCOPY.
+
config HAVE_CONTEXT_TRACKING
bool
help
ifdef CONFIG_SND_IMX_SOC
obj-y += ssi-fiq.o
- obj-y += ssi-fiq-ksym.o
endif
-# i.MX1 based machines
-obj-$(CONFIG_MACH_SCB9328) += mach-scb9328.o
-obj-$(CONFIG_MACH_APF9328) += mach-apf9328.o
-obj-$(CONFIG_MACH_IMX1_DT) += imx1-dt.o
-
# i.MX21 based machines
obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o
sync /* additional sync needed on g4 */
isync
blr
+_ASM_NOKPROBE_SYMBOL(flush_icache_range)
+ EXPORT_SYMBOL(flush_icache_range)
+
/*
* Flush a particular page from the data cache to RAM.
* Note: this is necessary because the instruction cache does *not*
bdnz 2b
isync
blr
- .previous .text
+_ASM_NOKPROBE_SYMBOL(flush_icache_range)
+ EXPORT_SYMBOL(flush_icache_range)
+
/*
* Like above, but only do the D-cache.
*
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>
+ #include <asm/export.h>
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/* void do_load_up_transact_altivec(struct thread_struct *thread)
- *
- * This is similar to load_up_altivec but for the transactional version of the
- * vector regs. It doesn't mess with the task MSR or valid flags.
- * Furthermore, VEC laziness is not supported with TM currently.
- */
-_GLOBAL(do_load_up_transact_altivec)
- mfmsr r6
- oris r5,r6,MSR_VEC@h
- MTMSRD(r5)
- isync
-
- li r4,1
- stw r4,THREAD_USED_VR(r3)
-
- li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
- lvx v0,r10,r3
- mtvscr v0
- addi r10,r3,THREAD_TRANSACT_VRSTATE
- REST_32VRS(0,r4,r10)
-
- blr
-#endif
-
/*
* Load state from memory into VMX registers including VSCR.
* Assumes the caller has enabled VMX in the MSR.
.L_done_memcpy_trap:
xorq %rax, %rax
ret
-ENDPROC(memcpy_mcsafe)
-EXPORT_SYMBOL_GPL(memcpy_mcsafe)
+ENDPROC(memcpy_mcsafe_unrolled)
++EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
.section .fixup, "ax"
/* Return -EFAULT for any failure */