select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_WANT_IPC_PARSE_VERSION
+++ +++ select BUILDTIME_EXTABLE_SORT if MMU
select CPU_PM if (SUSPEND || CPU_IDLE)
--- --- select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN
+++ +++ select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN && MMU
select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_IRQ_PROBE
select HAVE_AOUT
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
+++ +++ select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
select HAVE_C_RECORDMCOUNT
default "4"
config HOTPLUG_CPU
- ---- bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
- ---- depends on SMP && HOTPLUG && EXPERIMENTAL
+ ++++ bool "Support for hot-pluggable CPUs"
+ ++++ depends on SMP && HOTPLUG
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
default 100
config THUMB2_KERNEL
- ---- bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
- ---- depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
+ ++++ bool "Compile the kernel in Thumb-2 mode"
+ ++++ depends on CPU_V7 && !CPU_V6 && !CPU_V6K
select AEABI
select ARM_ASM_UNIFIED
select ARM_UNWIND
config XEN
bool "Xen guest support on ARM (EXPERIMENTAL)"
depends on EXPERIMENTAL && ARM && OF
+ ++++ depends on CPU_V7 && !CPU_V6
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
- ---- # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
- ---- KBUILD_CFLAGS +=$(call cc-option,-marm,)
# Never generate .eh_frame
KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
# defines filename extension depending memory management type.
ifeq ($(CONFIG_MMU),)
MMUEXT := -nommu
+++ +++KBUILD_CFLAGS += $(call cc-option,-mno-unaligned-access)
endif
ifeq ($(CONFIG_FRAME_POINTER),y)
ifeq ($(CONFIG_THUMB2_KERNEL),y)
AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
- ---- CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
- ---- AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
+ ++++ CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
+ ++++ AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
# Work around buggy relocation from gas if requested:
ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
CFLAGS_MODULE +=-fno-optimize-sibling-calls
endif
+ ++++ else
+ ++++ CFLAGS_ISA :=$(call cc-option,-marm,)
+ ++++ AFLAGS_ISA :=$(CFLAGS_ISA)
endif
# Need -Uarm for gcc < 3.x
- ---- KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_THUMB2) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
- ---- KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_THUMB2) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
+ ++++ KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
+ ++++ KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
CHECKFLAGS += -D__arm__
bl schedule_tail
cmp r5, #0
movne r0, r4
- ---- movne lr, pc
+ ++++ adrne lr, BSYM(1f)
movne pc, r5
- ---- get_thread_info tsk
+ ++++ 1: get_thread_info tsk
b ret_slow_syscall
ENDPROC(ret_from_fork)
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
stmdb sp!, {r4, r5} @ push fifth and sixth args
--- ---#ifdef CONFIG_SECCOMP
--- --- tst r10, #_TIF_SECCOMP
--- --- beq 1f
--- --- mov r0, scno
--- --- bl __secure_computing
--- --- add r0, sp, #S_R0 + S_OFF @ pointer to regs
--- --- ldmia r0, {r0 - r3} @ have to reload r0 - r3
--- ---1:
--- ---#endif
--- ---
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
ldmccia r1, {r0 - r6} @ have to reload r0 - r6
stmccia sp, {r4, r5} @ and update the stack args
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
--- --- b 2b
+++ +++ cmp scno, #-1 @ skip the syscall?
+++ +++ bne 2b
+++ +++ add sp, sp, #S_OFF @ restore stack
+++ +++ b ret_slow_syscall
__sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
--- --- mov r1, scno
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
asmlinkage void __cpuinit secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- ---- unsigned int cpu = smp_processor_id();
+ ++++ unsigned int cpu;
+ ++++
+ ++++ /*
+ ++++ * The identity mapping is uncached (strongly ordered), so
+ ++++ * switch away from it before attempting any exclusive accesses.
+ ++++ */
+ ++++ cpu_switch_mm(mm->pgd, mm);
+ ++++ enter_lazy_tlb(mm, current);
+ ++++ local_flush_tlb_all();
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
+ ++++ cpu = smp_processor_id();
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
- ---- cpu_switch_mm(mm->pgd, mm);
- ---- enter_lazy_tlb(mm, current);
- ---- local_flush_tlb_all();
printk("CPU%u: Booted secondary processor\n", cpu);
smp_cross_call(mask, IPI_CALL_FUNC);
}
+++ +++void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+++ +++{
+++ +++ smp_cross_call(mask, IPI_WAKEUP);
+++ +++}
+++ +++
void arch_send_call_function_single_ipi(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
- ----- for_each_present_cpu(cpu)
+ +++++ for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
__get_irq_stat(cpu, ipi_irqs[i]));
static struct clk *twd_clk;
static unsigned long twd_timer_rate;
+++ +++static bool common_setup_called;
+++ +++static DEFINE_PER_CPU(bool, percpu_setup_called);
static struct clock_event_device __percpu **twd_evt;
static int twd_ppi;
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- ---- /* timer load already set up */
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
- ---- __raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD);
+ ++++ __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+ ++++ twd_base + TWD_TIMER_LOAD);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* period set, and timer enabled in 'next_event' hook */
return clk;
}
--- --- err = clk_prepare(clk);
+++ +++ err = clk_prepare_enable(clk);
if (err) {
--- --- pr_err("smp_twd: clock failed to prepare: %d\n", err);
--- --- clk_put(clk);
--- --- return ERR_PTR(err);
--- --- }
--- ---
--- --- err = clk_enable(clk);
--- --- if (err) {
--- --- pr_err("smp_twd: clock failed to enable: %d\n", err);
--- --- clk_unprepare(clk);
+++ +++ pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
clk_put(clk);
return ERR_PTR(err);
}
static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
{
struct clock_event_device **this_cpu_clk;
+++ +++ int cpu = smp_processor_id();
+++ +++
+++ +++ /*
+++ +++ * If the basic setup for this CPU has been done before don't
+++ +++ * bother with the below.
+++ +++ */
+++ +++ if (per_cpu(percpu_setup_called, cpu)) {
+++ +++ __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+++ +++ clockevents_register_device(*__this_cpu_ptr(twd_evt));
+++ +++ enable_percpu_irq(clk->irq, 0);
+++ +++ return 0;
+++ +++ }
+++ +++ per_cpu(percpu_setup_called, cpu) = true;
--- --- if (!twd_clk)
+++ +++ /*
+++ +++ * This stuff only need to be done once for the entire TWD cluster
+++ +++ * during the runtime of the system.
+++ +++ */
+++ +++ if (!common_setup_called) {
twd_clk = twd_get_clock();
--- --- if (!IS_ERR_OR_NULL(twd_clk))
--- --- twd_timer_rate = clk_get_rate(twd_clk);
--- --- else
--- --- twd_calibrate_rate();
+++ +++ /*
+++ +++ * We use IS_ERR_OR_NULL() here, because if the clock stubs
+++ +++ * are active we will get a valid clk reference which is
+++ +++ * however NULL and will return the rate 0. In that case we
+++ +++ * need to calibrate the rate instead.
+++ +++ */
+++ +++ if (!IS_ERR_OR_NULL(twd_clk))
+++ +++ twd_timer_rate = clk_get_rate(twd_clk);
+++ +++ else
+++ +++ twd_calibrate_rate();
+++ +++
+++ +++ common_setup_called = true;
+++ +++ }
+++ +++ /*
+++ +++ * The following is done once per CPU the first time .setup() is
+++ +++ * called.
+++ +++ */
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer";