* 'softirq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
softirqs: Make wakeup_softirqd static
* 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, asm: Restore parentheses around one pushl_cfi argument
x86, asm: Fix ancient-GAS workaround
x86, asm: Fix CFI macro invocations to deal with shortcomings in gas
* 'x86-numa-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, numa: Assign CPUs to nodes in round-robin manner on fake NUMA
* 'x86-quirks-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: HPET force enable for CX700 / VIA Epia LT
* 'x86-setup-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, setup: Use string copy operation to optimze copy in kernel compression
* 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, UV: Use allocated buffer in tlb_uv.c:tunables_read()
* 'x86-vm86-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, vm86: Fix preemption bug for int1 debug and int3 breakpoint handlers.
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
-- -----BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
-- ----- smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
-- ----- smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
-- ----- smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
-- ----- smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
-- ----- smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
-- ----- smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
-- ----- smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
++ +++++.irpc idx, "01234567"
++ +++++BUILD_INTERRUPT3(invalidate_interrupt\idx,
++ +++++ (INVALIDATE_TLB_VECTOR_START)+\idx,
smp_invalidate_interrupt)
++ +++++.endr
#endif
BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-------#ifdef CONFIG_PERF_EVENTS
-------BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
+++++++#ifdef CONFIG_IRQ_WORK
+++++++BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
c->cpuid_level = cpuid_eax(0);
++ + ++ get_cpu_cap(c);
}
}
{
#ifdef CONFIG_SMP
/* calling is from identify_secondary_cpu() ? */
------- if (c->cpu_index == boot_cpu_id)
+++++++ if (!c->cpu_index)
return;
/*
/* Don't do the funky fallback heuristics the AMD version employs
for now. */
node = apicid_to_node[apicid];
--- ---- if (node == NUMA_NO_NODE)
--- ---- node = first_node(node_online_map);
--- ---- else if (!node_online(node)) {
+++ ++++ if (node == NUMA_NO_NODE || !node_online(node)) {
/* reuse the value from init_cpu_to_node() */
node = cpu_to_node(cpu);
}
.macro FAKE_STACK_FRAME child_rip
/* push in order ss, rsp, eflags, cs, rip */
xorl %eax, %eax
- ----- pushq $__KERNEL_DS /* ss */
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi $__KERNEL_DS /* ss */
/*CFI_REL_OFFSET ss,0*/
- ----- pushq %rax /* rsp */
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi %rax /* rsp */
CFI_REL_OFFSET rsp,0
- ----- pushq $X86_EFLAGS_IF /* eflags - interrupts on */
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */
/*CFI_REL_OFFSET rflags,0*/
- ----- pushq $__KERNEL_CS /* cs */
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi $__KERNEL_CS /* cs */
/*CFI_REL_OFFSET cs,0*/
- ----- pushq \child_rip /* rip */
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi \child_rip /* rip */
CFI_REL_OFFSET rip,0
- ----- pushq %rax /* orig rax */
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi %rax /* orig rax */
.endm
.macro UNFAKE_STACK_FRAME
LOCK ; btr $TIF_FORK,TI_flags(%r8)
- ----- push kernel_eflags(%rip)
- ----- CFI_ADJUST_CFA_OFFSET 8
- ----- popf # reset kernel eflags
- ----- CFI_ADJUST_CFA_OFFSET -8
+ +++++ pushq_cfi kernel_eflags(%rip)
+ +++++ popfq_cfi # reset kernel eflags
call schedule_tail # rdi: 'prev' task parameter
jnc sysret_signal
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
- ----- pushq %rdi
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi %rdi
call schedule
- ----- popq %rdi
- ----- CFI_ADJUST_CFA_OFFSET -8
+ +++++ popq_cfi %rdi
jmp sysret_check
/* Handle a signal */
jnc int_very_careful
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
- ----- pushq %rdi
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi %rdi
call schedule
- ----- popq %rdi
- ----- CFI_ADJUST_CFA_OFFSET -8
+ +++++ popq_cfi %rdi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
jmp int_with_check
/* Check for syscall exit trace */
testl $_TIF_WORK_SYSCALL_EXIT,%edx
jz int_signal
- ----- pushq %rdi
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi %rdi
leaq 8(%rsp),%rdi # &ptregs -> arg1
call syscall_trace_leave
- ----- popq %rdi
- ----- CFI_ADJUST_CFA_OFFSET -8
+ +++++ popq_cfi %rdi
andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
jmp int_restore_rest
ENTRY(stub_execve)
CFI_STARTPROC
- ----- popq %r11
- ----- CFI_ADJUST_CFA_OFFSET -8
- ----- CFI_REGISTER rip, r11
+ +++++ addq $8, %rsp
+ +++++ PARTIAL_FRAME 0
SAVE_REST
FIXUP_TOP_OF_STACK %r11
movq %rsp, %rcx
ENTRY(stub_rt_sigreturn)
CFI_STARTPROC
addq $8, %rsp
- ----- CFI_ADJUST_CFA_OFFSET -8
+ +++++ PARTIAL_FRAME 0
SAVE_REST
movq %rsp,%rdi
FIXUP_TOP_OF_STACK %r11
.if vector <> FIRST_EXTERNAL_VECTOR
CFI_ADJUST_CFA_OFFSET -8
.endif
- -----1: pushq $(~vector+0x80) /* Note: always in signed byte range */
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
.if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
jmp 2f
.endif
/* 0(%rsp): ~(interrupt number) */
.macro interrupt func
- ----- subq $10*8, %rsp
- ----- CFI_ADJUST_CFA_OFFSET 10*8
+ +++++ subq $ORIG_RAX-ARGOFFSET+8, %rsp
+ +++++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8
call save_args
PARTIAL_FRAME 0
call \func
TRACE_IRQS_OFF
decl PER_CPU_VAR(irq_count)
leaveq
+ +++++ CFI_RESTORE rbp
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
exit_intr:
jnc retint_signal
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
- ----- pushq %rdi
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi %rdi
call schedule
- ----- popq %rdi
- ----- CFI_ADJUST_CFA_OFFSET -8
+ +++++ popq_cfi %rdi
GET_THREAD_INFO(%rcx)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
.macro apicinterrupt num sym do_sym
ENTRY(\sym)
INTR_FRAME
- ----- pushq $~(\num)
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi $~(\num)
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
x86_platform_ipi smp_x86_platform_ipi
#ifdef CONFIG_SMP
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
-- ----- invalidate_interrupt0 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \
-- ----- invalidate_interrupt1 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
-- ----- invalidate_interrupt2 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
-- ----- invalidate_interrupt3 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
-- ----- invalidate_interrupt4 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \
-- ----- invalidate_interrupt5 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \
-- ----- invalidate_interrupt6 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
-- ----- invalidate_interrupt7 smp_invalidate_interrupt
++ +++++.irpc idx, "01234567"
++ +++++apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \
++ +++++ invalidate_interrupt\idx smp_invalidate_interrupt
++ +++++.endr
#endif
apicinterrupt THRESHOLD_APIC_VECTOR \
apicinterrupt SPURIOUS_APIC_VECTOR \
spurious_interrupt smp_spurious_interrupt
-------#ifdef CONFIG_PERF_EVENTS
-------apicinterrupt LOCAL_PENDING_VECTOR \
------- perf_pending_interrupt smp_perf_pending_interrupt
+++++++#ifdef CONFIG_IRQ_WORK
+++++++apicinterrupt IRQ_WORK_VECTOR \
+++++++ irq_work_interrupt smp_irq_work_interrupt
#endif
/*
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
- ----- subq $15*8,%rsp
- ----- CFI_ADJUST_CFA_OFFSET 15*8
+ +++++ subq $ORIG_RAX-R15, %rsp
+ +++++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
movq %rsp,%rdi /* pt_regs pointer */
ENTRY(\sym)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
- ----- pushq $-1 /* ORIG_RAX: no syscall to restart */
- ----- CFI_ADJUST_CFA_OFFSET 8
- ----- subq $15*8, %rsp
+ +++++ pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
+ +++++ subq $ORIG_RAX-R15, %rsp
+ +++++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
movq %rsp,%rdi /* pt_regs pointer */
ENTRY(\sym)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
- ----- pushq $-1 /* ORIG_RAX: no syscall to restart */
- ----- CFI_ADJUST_CFA_OFFSET 8
- ----- subq $15*8, %rsp
+ +++++ pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
+ +++++ subq $ORIG_RAX-R15, %rsp
+ +++++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
movq %rsp,%rdi /* pt_regs pointer */
ENTRY(\sym)
XCPT_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
- ----- subq $15*8,%rsp
- ----- CFI_ADJUST_CFA_OFFSET 15*8
+ +++++ subq $ORIG_RAX-R15, %rsp
+ +++++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
movq %rsp,%rdi /* pt_regs pointer */
ENTRY(\sym)
XCPT_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
- ----- subq $15*8,%rsp
- ----- CFI_ADJUST_CFA_OFFSET 15*8
+ +++++ subq $ORIG_RAX-R15, %rsp
+ +++++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
DEFAULT_FRAME 0
TRACE_IRQS_OFF
/* edi: new selector */
ENTRY(native_load_gs_index)
CFI_STARTPROC
- ----- pushf
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushfq_cfi
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
SWAPGS
gs_change:
movl %edi,%gs
2: mfence /* workaround */
SWAPGS
- ----- popf
- ----- CFI_ADJUST_CFA_OFFSET -8
+ +++++ popfq_cfi
ret
CFI_ENDPROC
END(native_load_gs_index)
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq)
CFI_STARTPROC
- ----- push %rbp
- ----- CFI_ADJUST_CFA_OFFSET 8
+ +++++ pushq_cfi %rbp
CFI_REL_OFFSET rbp,0
mov %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
push %rbp # backlink for old unwinder
call __do_softirq
leaveq
+ +++++ CFI_RESTORE rbp
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
/* ebx: no swapgs flag */
ENTRY(paranoid_exit)
- ----- INTR_FRAME
+ +++++ DEFAULT_FRAME
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
testl %ebx,%ebx /* swapgs needed? */
error_sti:
TRACE_IRQS_OFF
ret
- ----- CFI_ENDPROC
/*
* There are two places in the kernel that can potentially fault with
/* Fix truncated RIP */
movq %rcx,RIP+8(%rsp)
jmp error_swapgs
+ +++++ CFI_ENDPROC
END(error_entry)
INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1
- ----- subq $15*8, %rsp
- ----- CFI_ADJUST_CFA_OFFSET 15*8
+ +++++ subq $ORIG_RAX-R15, %rsp
+ +++++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
DEFAULT_FRAME 0
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
static ssize_t tunables_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
------ - char buf[300];
++++++ + char *buf;
int ret;
------ - ret = snprintf(buf, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
++++++ + buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
"max_bau_concurrent plugged_delay plugsb4reset",
"timeoutsb4reset ipi_reset_limit complete_threshold",
"congested_response_us congested_reps congested_period",
timeoutsb4reset, ipi_reset_limit, complete_threshold,
congested_response_us, congested_reps, congested_period);
------ - return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
++++++ + if (!buf)
++++++ + return -ENOMEM;
++++++ +
++++++ + ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
++++++ + kfree(buf);
++++++ + return ret;
}
/*
.open = tunables_open,
.read = tunables_read,
.write = tunables_write,
+++++++ .llseek = default_llseek,
};
static int __init uv_ptc_init(void)
if (regs->flags & X86_VM_MASK) {
handle_vm86_trap((struct kernel_vm86_regs *) regs,
error_code, 1);
+++++++ preempt_conditional_cli(regs);
return;
}
}
EXPORT_SYMBOL_GPL(math_state_restore);
-------#ifndef CONFIG_MATH_EMULATION
-------void math_emulate(struct math_emu_info *info)
-------{
------- printk(KERN_EMERG
------- "math-emulation not enabled and no coprocessor found.\n");
------- printk(KERN_EMERG "killing %s.\n", current->comm);
------- force_sig(SIGFPE, current);
------- schedule();
-------}
-------#endif /* CONFIG_MATH_EMULATION */
-------
dotraplinkage void __kprobes
do_device_not_available(struct pt_regs *regs, long error_code)
{
-------#ifdef CONFIG_X86_32
+++++++#ifdef CONFIG_MATH_EMULATION
if (read_cr0() & X86_CR0_EM) {
struct math_emu_info info = { };
info.regs = regs;
math_emulate(&info);
------- } else {
------- math_state_restore(); /* interrupts still off */
------- conditional_sti(regs);
+++++++ return;
}
-------#else
------- math_state_restore();
+++++++#endif
+++++++ math_state_restore(); /* interrupts still off */
+++++++#ifdef CONFIG_X86_32
+++++++ conditional_sti(regs);
#endif
}
#endif
#ifdef CONFIG_X86_32
------- if (cpu_has_fxsr) {
------- printk(KERN_INFO "Enabling fast FPU save and restore... ");
------- set_in_cr4(X86_CR4_OSFXSR);
------- printk("done.\n");
------- }
------- if (cpu_has_xmm) {
------- printk(KERN_INFO
------- "Enabling unmasked SIMD FPU exception support... ");
------- set_in_cr4(X86_CR4_OSXMMEXCPT);
------- printk("done.\n");
------- }
-------
set_system_trap_gate(SYSCALL_VECTOR, &system_call);
set_bit(SYSCALL_VECTOR, used_vectors);
#endif
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
+++++++#include <trace/events/irq.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
asmlinkage void __do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
-------#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
+++++++static inline void __raise_softirq_irqoff(unsigned int nr)
+++++++{
+++++++ trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL);
+++++++ or_softirq_pending(1UL << nr);
+++++++}
+++++++
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
- ------extern void wakeup_softirqd(void);
/* This is the worklist that queues up per-cpu softirq work.
*
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);
-------struct irq_desc;
-------
extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
-------extern int arch_init_chip_data(struct irq_desc *desc, int node);
#endif
* to the pending events, so lets the scheduler to balance
* the softirq load for us.
*/
- ------void wakeup_softirqd(void)
+ ++++++static void wakeup_softirqd(void)
{
/* Interrupts are disabled: no need to stop preemption */
struct task_struct *tsk = __get_cpu_var(ksoftirqd);
}
/*
+++++++ * preempt_count and SOFTIRQ_OFFSET usage:
+++++++ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+++++++ * softirq processing.
+++++++ * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
+++++++ * on local_bh_disable or local_bh_enable.
+++++++ * This lets us distinguish between whether we are currently processing
+++++++ * softirq and whether we just have bh disabled.
+++++++ */
+++++++
+++++++/*
* This one is for softirq.c-internal use,
* where hardirqs are disabled legitimately:
*/
#ifdef CONFIG_TRACE_IRQFLAGS
-------static void __local_bh_disable(unsigned long ip)
+++++++static void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
* We must manually increment preempt_count here and manually
* call the trace_preempt_off later.
*/
------- preempt_count() += SOFTIRQ_OFFSET;
+++++++ preempt_count() += cnt;
/*
* Were softirqs turned off above:
*/
------- if (softirq_count() == SOFTIRQ_OFFSET)
+++++++ if (softirq_count() == cnt)
trace_softirqs_off(ip);
raw_local_irq_restore(flags);
------- if (preempt_count() == SOFTIRQ_OFFSET)
+++++++ if (preempt_count() == cnt)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
#else /* !CONFIG_TRACE_IRQFLAGS */
-------static inline void __local_bh_disable(unsigned long ip)
+++++++static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
------- add_preempt_count(SOFTIRQ_OFFSET);
+++++++ add_preempt_count(cnt);
barrier();
}
#endif /* CONFIG_TRACE_IRQFLAGS */
void local_bh_disable(void)
{
------- __local_bh_disable((unsigned long)__builtin_return_address(0));
+++++++ __local_bh_disable((unsigned long)__builtin_return_address(0),
+++++++ SOFTIRQ_DISABLE_OFFSET);
}
EXPORT_SYMBOL(local_bh_disable);
+++++++static void __local_bh_enable(unsigned int cnt)
+++++++{
+++++++ WARN_ON_ONCE(in_irq());
+++++++ WARN_ON_ONCE(!irqs_disabled());
+++++++
+++++++ if (softirq_count() == cnt)
+++++++ trace_softirqs_on((unsigned long)__builtin_return_address(0));
+++++++ sub_preempt_count(cnt);
+++++++}
+++++++
/*
* Special-case - softirqs can safely be enabled in
* cond_resched_softirq(), or by __do_softirq(),
*/
void _local_bh_enable(void)
{
------- WARN_ON_ONCE(in_irq());
------- WARN_ON_ONCE(!irqs_disabled());
-------
------- if (softirq_count() == SOFTIRQ_OFFSET)
------- trace_softirqs_on((unsigned long)__builtin_return_address(0));
------- sub_preempt_count(SOFTIRQ_OFFSET);
+++++++ __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
}
EXPORT_SYMBOL(_local_bh_enable);
/*
* Are softirqs going to be turned on now:
*/
------- if (softirq_count() == SOFTIRQ_OFFSET)
+++++++ if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
trace_softirqs_on(ip);
/*
* Keep preemption disabled until we are done with
* softirq processing:
*/
------- sub_preempt_count(SOFTIRQ_OFFSET - 1);
+++++++ sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
if (unlikely(!in_interrupt() && local_softirq_pending()))
do_softirq();
pending = local_softirq_pending();
account_system_vtime(current);
------- __local_bh_disable((unsigned long)__builtin_return_address(0));
+++++++ __local_bh_disable((unsigned long)__builtin_return_address(0),
+++++++ SOFTIRQ_OFFSET);
lockdep_softirq_enter();
cpu = smp_processor_id();
lockdep_softirq_exit();
account_system_vtime(current);
------- _local_bh_enable();
+++++++ __local_bh_enable(SOFTIRQ_OFFSET);
}
#ifndef __ARCH_HAS_DO_SOFTIRQ
rcu_irq_enter();
if (idle_cpu(cpu) && !in_interrupt()) {
------- __irq_enter();
+++++++ /*
+++++++ * Prevent raise_softirq from needlessly waking up ksoftirqd
+++++++ * here, as softirq will be serviced on return from interrupt.
+++++++ */
+++++++ local_bh_disable();
tick_check_idle(cpu);
------- } else
------- __irq_enter();
+++++++ _local_bh_enable();
+++++++ }
+++++++
+++++++ __irq_enter();
}
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
{
set_current_state(TASK_INTERRUPTIBLE);
+++++++ current->flags |= PF_KSOFTIRQD;
while (!kthread_should_stop()) {
preempt_disable();
if (!local_softirq_pending()) {
return 0;
}
+++++++#ifdef CONFIG_GENERIC_HARDIRQS
int __init __weak arch_probe_nr_irqs(void)
{
------- return 0;
+++++++ return NR_IRQS_LEGACY;
}
int __init __weak arch_early_irq_init(void)
{
return 0;
}
-------
-------int __weak arch_init_chip_data(struct irq_desc *desc, int node)
-------{
------- return 0;
-------}
+++++++#endif