Merge branches 'softirq-for-linus', 'x86-debug-for-linus', 'x86-numa-for-linus',...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 23 Oct 2010 15:25:36 +0000 (08:25 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 23 Oct 2010 15:25:36 +0000 (08:25 -0700)
* 'softirq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  softirqs: Make wakeup_softirqd static

* 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, asm: Restore parentheses around one pushl_cfi argument
  x86, asm: Fix ancient-GAS workaround
  x86, asm: Fix CFI macro invocations to deal with shortcomings in gas

* 'x86-numa-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, numa: Assign CPUs to nodes in round-robin manner on fake NUMA

* 'x86-quirks-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: HPET force enable for CX700 / VIA Epia LT

* 'x86-setup-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, setup: Use string copy operation to optimze copy in kernel compression

* 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, UV: Use allocated buffer in tlb_uv.c:tunables_read()

* 'x86-vm86-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, vm86: Fix preemption bug for int1 debug and int3 breakpoint handlers.

1  2  3  4  5  6  7  8 
arch/x86/include/asm/entry_arch.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/tlb_uv.c
arch/x86/kernel/traps.c
include/linux/interrupt.h
kernel/softirq.c

@@@@@@@@@ -16,22 -16,22 -16,11 -16,22 -16,22 -16,22 -16,22 -16,22 +16,11 @@@@@@@@@ BUILD_INTERRUPT(call_function_single_in
        BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
        BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
        
-- -----BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
-- -----                 smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
-- -----                 smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
-- -----                 smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
-- -----                 smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
-- -----                 smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
-- -----                 smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
-- -----                 smp_invalidate_interrupt)
-- -----BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
++ +++++.irpc idx, "01234567"
++ +++++BUILD_INTERRUPT3(invalidate_interrupt\idx,
++ +++++                 (INVALIDATE_TLB_VECTOR_START)+\idx,
                         smp_invalidate_interrupt)
++ +++++.endr
        #endif
        
        BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
@@@@@@@@@ -49,8 -49,8 -38,8 -49,8 -49,8 -49,8 -49,8 -49,8 +38,8 @@@@@@@@@ BUILD_INTERRUPT(apic_timer_interrupt,LO
        BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
        BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
        
 -------#ifdef CONFIG_PERF_EVENTS
 -------BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
 +++++++#ifdef CONFIG_IRQ_WORK
 +++++++BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
        #endif
        
        #ifdef CONFIG_X86_THERMAL_VECTOR
@@@@@@@@@ -39,7 -39,6 -39,6 -39,7 -39,6 -39,7 -39,6 -39,6 +39,7 @@@@@@@@@ static void __cpuinit early_init_intel(
                                misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
                                wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
                                c->cpuid_level = cpuid_eax(0);
 ++ + ++                        get_cpu_cap(c);
                        }
                }
        
@@@@@@@@@ -170,7 -169,7 -169,7 -170,7 -169,7 -170,7 -169,7 -169,7 +170,7 @@@@@@@@@ static void __cpuinit intel_smp_check(s
        {
        #ifdef CONFIG_SMP
                /* calling is from identify_secondary_cpu() ? */
 -------        if (c->cpu_index == boot_cpu_id)
 +++++++        if (!c->cpu_index)
                        return;
        
                /*
@@@@@@@@@ -284,9 -283,9 -283,9 -284,7 -283,9 -284,9 -283,9 -283,9 +284,7 @@@@@@@@@ static void __cpuinit srat_detect_node(
                /* Don't do the funky fallback heuristics the AMD version employs
                   for now. */
                node = apicid_to_node[apicid];
--- ----        if (node == NUMA_NO_NODE)
--- ----                node = first_node(node_online_map);
--- ----        else if (!node_online(node)) {
+++ ++++        if (node == NUMA_NO_NODE || !node_online(node)) {
                        /* reuse the value from init_cpu_to_node() */
                        node = cpu_to_node(cpu);
                }
@@@@@@@@@ -213,17 -213,23 -213,17 -213,23 -213,23 -213,23 -213,23 -213,23 +213,17 @@@@@@@@@ ENDPROC(native_usergs_sysret64
                .macro FAKE_STACK_FRAME child_rip
                /* push in order ss, rsp, eflags, cs, rip */
                xorl %eax, %eax
 - -----        pushq $__KERNEL_DS /* ss */
 - -----        CFI_ADJUST_CFA_OFFSET   8
 + +++++        pushq_cfi $__KERNEL_DS /* ss */
                /*CFI_REL_OFFSET        ss,0*/
 - -----        pushq %rax /* rsp */
 - -----        CFI_ADJUST_CFA_OFFSET   8
 + +++++        pushq_cfi %rax /* rsp */
                CFI_REL_OFFSET  rsp,0
 - -----        pushq $X86_EFLAGS_IF /* eflags - interrupts on */
 - -----        CFI_ADJUST_CFA_OFFSET   8
 + +++++        pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */
                /*CFI_REL_OFFSET        rflags,0*/
 - -----        pushq $__KERNEL_CS /* cs */
 - -----        CFI_ADJUST_CFA_OFFSET   8
 + +++++        pushq_cfi $__KERNEL_CS /* cs */
                /*CFI_REL_OFFSET        cs,0*/
 - -----        pushq \child_rip /* rip */
 - -----        CFI_ADJUST_CFA_OFFSET   8
 + +++++        pushq_cfi \child_rip /* rip */
                CFI_REL_OFFSET  rip,0
 - -----        pushq   %rax /* orig rax */
 - -----        CFI_ADJUST_CFA_OFFSET   8
 + +++++        pushq_cfi %rax /* orig rax */
                .endm
        
                .macro UNFAKE_STACK_FRAME
@@@@@@@@@ -392,8 -398,10 -392,8 -398,10 -398,10 -398,10 -398,10 -398,10 +392,8 @@@@@@@@@ ENTRY(ret_from_fork
        
                LOCK ; btr $TIF_FORK,TI_flags(%r8)
        
 - -----        push kernel_eflags(%rip)
 - -----        CFI_ADJUST_CFA_OFFSET 8
 - -----        popf                                    # reset kernel eflags
 - -----        CFI_ADJUST_CFA_OFFSET -8
 + +++++        pushq_cfi kernel_eflags(%rip)
 + +++++        popfq_cfi                               # reset kernel eflags
        
                call schedule_tail                      # rdi: 'prev' task parameter
        
@@@@@@@@@ -513,9 -521,11 -513,9 -521,11 -521,11 -521,11 -521,11 -521,11 +513,9 @@@@@@@@@ sysret_careful
                jnc sysret_signal
                TRACE_IRQS_ON
                ENABLE_INTERRUPTS(CLBR_NONE)
 - -----        pushq %rdi
 - -----        CFI_ADJUST_CFA_OFFSET 8
 + +++++        pushq_cfi %rdi
                call schedule
 - -----        popq  %rdi
 - -----        CFI_ADJUST_CFA_OFFSET -8
 + +++++        popq_cfi %rdi
                jmp sysret_check
        
                /* Handle a signal */
@@@@@@@@@ -624,9 -634,11 -624,9 -634,11 -634,11 -634,11 -634,11 -634,11 +624,9 @@@@@@@@@ int_careful
                jnc  int_very_careful
                TRACE_IRQS_ON
                ENABLE_INTERRUPTS(CLBR_NONE)
 - -----        pushq %rdi
 - -----        CFI_ADJUST_CFA_OFFSET 8
 + +++++        pushq_cfi %rdi
                call schedule
 - -----        popq %rdi
 - -----        CFI_ADJUST_CFA_OFFSET -8
 + +++++        popq_cfi %rdi
                DISABLE_INTERRUPTS(CLBR_NONE)
                TRACE_IRQS_OFF
                jmp int_with_check
@@@@@@@@@ -640,10 -652,12 -640,10 -652,12 -652,12 -652,12 -652,12 -652,12 +640,10 @@@@@@@@@ int_check_syscall_exit_work
                /* Check for syscall exit trace */
                testl $_TIF_WORK_SYSCALL_EXIT,%edx
                jz int_signal
 - -----        pushq %rdi
 - -----        CFI_ADJUST_CFA_OFFSET 8
 + +++++        pushq_cfi %rdi
                leaq 8(%rsp),%rdi       # &ptregs -> arg1
                call syscall_trace_leave
 - -----        popq %rdi
 - -----        CFI_ADJUST_CFA_OFFSET -8
 + +++++        popq_cfi %rdi
                andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
                jmp int_restore_rest
        
@@@@@@@@@ -700,8 -714,9 -700,8 -714,9 -714,9 -714,9 -714,9 -714,9 +700,8 @@@@@@@@@ END(ptregscall_common
        
        ENTRY(stub_execve)
                CFI_STARTPROC
 - -----        popq %r11
 - -----        CFI_ADJUST_CFA_OFFSET -8
 - -----        CFI_REGISTER rip, r11
 + +++++        addq $8, %rsp
 + +++++        PARTIAL_FRAME 0
                SAVE_REST
                FIXUP_TOP_OF_STACK %r11
                movq %rsp, %rcx
@@@@@@@@@ -720,7 -735,7 -720,7 -735,7 -735,7 -735,7 -735,7 -735,7 +720,7 @@@@@@@@@ END(stub_execve
        ENTRY(stub_rt_sigreturn)
                CFI_STARTPROC
                addq $8, %rsp
 - -----        CFI_ADJUST_CFA_OFFSET   -8
 + +++++        PARTIAL_FRAME 0
                SAVE_REST
                movq %rsp,%rdi
                FIXUP_TOP_OF_STACK %r11
@@@@@@@@@ -751,7 -766,8 -751,7 -766,8 -766,8 -766,8 -766,8 -766,8 +751,7 @@@@@@@@@ vector=FIRST_EXTERNAL_VECTO
              .if vector <> FIRST_EXTERNAL_VECTOR
                CFI_ADJUST_CFA_OFFSET -8
              .endif
 - -----1:      pushq $(~vector+0x80)   /* Note: always in signed byte range */
 - -----        CFI_ADJUST_CFA_OFFSET 8
 + +++++1:      pushq_cfi $(~vector+0x80)       /* Note: always in signed byte range */
              .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
                jmp 2f
              .endif
@@@@@@@@@ -780,8 -796,8 -780,8 -796,8 -796,8 -796,8 -796,8 -796,8 +780,8 @@@@@@@@@ END(interrupt
        
        /* 0(%rsp): ~(interrupt number) */
                .macro interrupt func
 - -----        subq $10*8, %rsp
 - -----        CFI_ADJUST_CFA_OFFSET 10*8
 + +++++        subq $ORIG_RAX-ARGOFFSET+8, %rsp
 + +++++        CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8
                call save_args
                PARTIAL_FRAME 0
                call \func
@@@@@@@@@ -806,7 -822,6 -806,7 -822,6 -822,6 -822,6 -822,6 -822,6 +806,7 @@@@@@@@@ ret_from_intr
                TRACE_IRQS_OFF
                decl PER_CPU_VAR(irq_count)
                leaveq
 + +++++        CFI_RESTORE             rbp
                CFI_DEF_CFA_REGISTER    rsp
                CFI_ADJUST_CFA_OFFSET   -8
        exit_intr:
@@@@@@@@@ -888,9 -903,11 -888,9 -903,11 -903,11 -903,11 -903,11 -903,11 +888,9 @@@@@@@@@ retint_careful
                jnc   retint_signal
                TRACE_IRQS_ON
                ENABLE_INTERRUPTS(CLBR_NONE)
 - -----        pushq %rdi
 - -----        CFI_ADJUST_CFA_OFFSET   8
 + +++++        pushq_cfi %rdi
                call  schedule
 - -----        popq %rdi
 - -----        CFI_ADJUST_CFA_OFFSET   -8
 + +++++        popq_cfi %rdi
                GET_THREAD_INFO(%rcx)
                DISABLE_INTERRUPTS(CLBR_NONE)
                TRACE_IRQS_OFF
@@@@@@@@@ -939,7 -956,8 -939,7 -956,8 -956,8 -956,8 -956,8 -956,8 +939,7 @@@@@@@@@ END(common_interrupt
        .macro apicinterrupt num sym do_sym
        ENTRY(\sym)
                INTR_FRAME
 - -----        pushq $~(\num)
 - -----        CFI_ADJUST_CFA_OFFSET 8
 + +++++        pushq_cfi $~(\num)
                interrupt \do_sym
                jmp ret_from_intr
                CFI_ENDPROC
@@@@@@@@@ -963,22 -981,22 -963,10 -981,22 -981,22 -981,22 -981,22 -981,22 +963,10 @@@@@@@@@ apicinterrupt X86_PLATFORM_IPI_VECTOR 
                x86_platform_ipi smp_x86_platform_ipi
        
        #ifdef CONFIG_SMP
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
-- -----        invalidate_interrupt0 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \
-- -----        invalidate_interrupt1 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
-- -----        invalidate_interrupt2 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
-- -----        invalidate_interrupt3 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
-- -----        invalidate_interrupt4 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \
-- -----        invalidate_interrupt5 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \
-- -----        invalidate_interrupt6 smp_invalidate_interrupt
-- -----apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
-- -----        invalidate_interrupt7 smp_invalidate_interrupt
++ +++++.irpc idx, "01234567"
++ +++++apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \
++ +++++        invalidate_interrupt\idx smp_invalidate_interrupt
++ +++++.endr
        #endif
        
        apicinterrupt THRESHOLD_APIC_VECTOR \
@@@@@@@@@ -1005,9 -1023,9 -993,9 -1023,9 -1023,9 -1023,9 -1023,9 -1023,9 +993,9 @@@@@@@@@ apicinterrupt ERROR_APIC_VECTOR 
        apicinterrupt SPURIOUS_APIC_VECTOR \
                spurious_interrupt smp_spurious_interrupt
        
 -------#ifdef CONFIG_PERF_EVENTS
 -------apicinterrupt LOCAL_PENDING_VECTOR \
 -------        perf_pending_interrupt smp_perf_pending_interrupt
 +++++++#ifdef CONFIG_IRQ_WORK
 +++++++apicinterrupt IRQ_WORK_VECTOR \
 +++++++        irq_work_interrupt smp_irq_work_interrupt
        #endif
        
        /*
                INTR_FRAME
                PARAVIRT_ADJUST_EXCEPTION_FRAME
                pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
 - -----        subq $15*8,%rsp
 - -----        CFI_ADJUST_CFA_OFFSET 15*8
 + +++++        subq $ORIG_RAX-R15, %rsp
 + +++++        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
                call error_entry
                DEFAULT_FRAME 0
                movq %rsp,%rdi          /* pt_regs pointer */
        ENTRY(\sym)
                INTR_FRAME
                PARAVIRT_ADJUST_EXCEPTION_FRAME
 - -----        pushq $-1               /* ORIG_RAX: no syscall to restart */
 - -----        CFI_ADJUST_CFA_OFFSET 8
 - -----        subq $15*8, %rsp
 + +++++        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
 + +++++        subq $ORIG_RAX-R15, %rsp
 + +++++        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
                call save_paranoid
                TRACE_IRQS_OFF
                movq %rsp,%rdi          /* pt_regs pointer */
        ENTRY(\sym)
                INTR_FRAME
                PARAVIRT_ADJUST_EXCEPTION_FRAME
 - -----        pushq $-1               /* ORIG_RAX: no syscall to restart */
 - -----        CFI_ADJUST_CFA_OFFSET 8
 - -----        subq $15*8, %rsp
 + +++++        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
 + +++++        subq $ORIG_RAX-R15, %rsp
 + +++++        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
                call save_paranoid
                TRACE_IRQS_OFF
                movq %rsp,%rdi          /* pt_regs pointer */
        ENTRY(\sym)
                XCPT_FRAME
                PARAVIRT_ADJUST_EXCEPTION_FRAME
 - -----        subq $15*8,%rsp
 - -----        CFI_ADJUST_CFA_OFFSET 15*8
 + +++++        subq $ORIG_RAX-R15, %rsp
 + +++++        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
                call error_entry
                DEFAULT_FRAME 0
                movq %rsp,%rdi                  /* pt_regs pointer */
        ENTRY(\sym)
                XCPT_FRAME
                PARAVIRT_ADJUST_EXCEPTION_FRAME
 - -----        subq $15*8,%rsp
 - -----        CFI_ADJUST_CFA_OFFSET 15*8
 + +++++        subq $ORIG_RAX-R15, %rsp
 + +++++        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
                call save_paranoid
                DEFAULT_FRAME 0
                TRACE_IRQS_OFF
@@@@@@@@@ -1121,14 -1139,16 -1109,14 -1139,16 -1139,16 -1139,16 -1139,16 -1139,16 +1109,14 @@@@@@@@@ zeroentry simd_coprocessor_error do_sim
                /* edi:  new selector */
        ENTRY(native_load_gs_index)
                CFI_STARTPROC
 - -----        pushf
 - -----        CFI_ADJUST_CFA_OFFSET 8
 + +++++        pushfq_cfi
                DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
                SWAPGS
        gs_change:
                movl %edi,%gs
        2:      mfence          /* workaround */
                SWAPGS
 - -----        popf
 - -----        CFI_ADJUST_CFA_OFFSET -8
 + +++++        popfq_cfi
                ret
                CFI_ENDPROC
        END(native_load_gs_index)
@@@@@@@@@ -1195,7 -1215,8 -1183,7 -1215,8 -1215,8 -1215,8 -1215,8 -1215,8 +1183,7 @@@@@@@@@ END(kernel_execve
        /* Call softirq on interrupt stack. Interrupts are off. */
        ENTRY(call_softirq)
                CFI_STARTPROC
 - -----        push %rbp
 - -----        CFI_ADJUST_CFA_OFFSET   8
 + +++++        pushq_cfi %rbp
                CFI_REL_OFFSET rbp,0
                mov  %rsp,%rbp
                CFI_DEF_CFA_REGISTER rbp
                push  %rbp                      # backlink for old unwinder
                call __do_softirq
                leaveq
 + +++++        CFI_RESTORE             rbp
                CFI_DEF_CFA_REGISTER    rsp
                CFI_ADJUST_CFA_OFFSET   -8
                decl PER_CPU_VAR(irq_count)
@@@@@@@@@ -1348,7 -1368,7 -1336,7 -1368,7 -1368,7 -1368,7 -1368,7 -1368,7 +1336,7 @@@@@@@@@ paranoidzeroentry machine_check *machin
        
                /* ebx: no swapgs flag */
        ENTRY(paranoid_exit)
 - -----        INTR_FRAME
 + +++++        DEFAULT_FRAME
                DISABLE_INTERRUPTS(CLBR_NONE)
                TRACE_IRQS_OFF
                testl %ebx,%ebx                         /* swapgs needed? */
@@@@@@@@@ -1425,6 -1445,7 -1413,6 -1445,7 -1445,7 -1445,7 -1445,7 -1445,7 +1413,6 @@@@@@@@@ error_swapgs
        error_sti:
                TRACE_IRQS_OFF
                ret
 - -----        CFI_ENDPROC
        
        /*
         * There are two places in the kernel that can potentially fault with
                /* Fix truncated RIP */
                movq %rcx,RIP+8(%rsp)
                jmp error_swapgs
 + +++++        CFI_ENDPROC
        END(error_entry)
        
        
                INTR_FRAME
                PARAVIRT_ADJUST_EXCEPTION_FRAME
                pushq_cfi $-1
 - -----        subq $15*8, %rsp
 - -----        CFI_ADJUST_CFA_OFFSET 15*8
 + +++++        subq $ORIG_RAX-R15, %rsp
 + +++++        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
                call save_paranoid
                DEFAULT_FRAME 0
                /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
diff --combined arch/x86/kernel/tlb_uv.c
@@@@@@@@@ -1001,10 -1001,10 -1001,10 -1001,10 -1001,10 -1001,10 -1001,10 -1001,10 +1001,10 @@@@@@@@@ static int uv_ptc_seq_show(struct seq_f
        static ssize_t tunables_read(struct file *file, char __user *userbuf,
                                                        size_t count, loff_t *ppos)
        {
------ -        char buf[300];
++++++ +        char *buf;
                int ret;
        
------ -        ret = snprintf(buf, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
++++++ +        buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
                        "max_bau_concurrent plugged_delay plugsb4reset",
                        "timeoutsb4reset ipi_reset_limit complete_threshold",
                        "congested_response_us congested_reps congested_period",
                        timeoutsb4reset, ipi_reset_limit, complete_threshold,
                        congested_response_us, congested_reps, congested_period);
        
------ -        return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
++++++ +        if (!buf)
++++++ +                return -ENOMEM;
++++++ +
++++++ +        ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
++++++ +        kfree(buf);
++++++ +        return ret;
        }
        
        /*
@@@@@@@@@ -1285,7 -1285,6 -1285,6 -1285,6 -1285,6 -1285,6 -1290,6 -1285,6 +1290,7 @@@@@@@@@ static const struct file_operations tun
                .open           = tunables_open,
                .read           = tunables_read,
                .write          = tunables_write,
 +++++++        .llseek         = default_llseek,
        };
        
        static int __init uv_ptc_init(void)
diff --combined arch/x86/kernel/traps.c
@@@@@@@@@ -575,6 -575,6 -575,6 -575,6 -575,6 -575,6 -575,6 -575,7 +575,7 @@@@@@@@@ dotraplinkage void __kprobes do_debug(s
                if (regs->flags & X86_VM_MASK) {
                        handle_vm86_trap((struct kernel_vm86_regs *) regs,
                                        error_code, 1);
+++++++                 preempt_conditional_cli(regs);
                        return;
                }
        
@@@@@@@@@ -776,10 -776,21 -776,21 -776,21 -776,21 -776,21 -776,21 -777,21 +777,10 @@@@@@@@@ asmlinkage void math_state_restore(void
        }
        EXPORT_SYMBOL_GPL(math_state_restore);
        
 -------#ifndef CONFIG_MATH_EMULATION
 -------void math_emulate(struct math_emu_info *info)
 -------{
 -------        printk(KERN_EMERG
 -------                "math-emulation not enabled and no coprocessor found.\n");
 -------        printk(KERN_EMERG "killing %s.\n", current->comm);
 -------        force_sig(SIGFPE, current);
 -------        schedule();
 -------}
 -------#endif /* CONFIG_MATH_EMULATION */
 -------
        dotraplinkage void __kprobes
        do_device_not_available(struct pt_regs *regs, long error_code)
        {
 -------#ifdef CONFIG_X86_32
 +++++++#ifdef CONFIG_MATH_EMULATION
                if (read_cr0() & X86_CR0_EM) {
                        struct math_emu_info info = { };
        
        
                        info.regs = regs;
                        math_emulate(&info);
 -------        } else {
 -------                math_state_restore(); /* interrupts still off */
 -------                conditional_sti(regs);
 +++++++                return;
                }
 -------#else
 -------        math_state_restore();
 +++++++#endif
 +++++++        math_state_restore(); /* interrupts still off */
 +++++++#ifdef CONFIG_X86_32
 +++++++        conditional_sti(regs);
        #endif
        }
        
@@@@@@@@@ -870,6 -881,18 -881,18 -881,18 -881,18 -881,18 -881,18 -882,18 +871,6 @@@@@@@@@ void __init trap_init(void
        #endif
        
        #ifdef CONFIG_X86_32
 -------        if (cpu_has_fxsr) {
 -------                printk(KERN_INFO "Enabling fast FPU save and restore... ");
 -------                set_in_cr4(X86_CR4_OSFXSR);
 -------                printk("done.\n");
 -------        }
 -------        if (cpu_has_xmm) {
 -------                printk(KERN_INFO
 -------                        "Enabling unmasked SIMD FPU exception support... ");
 -------                set_in_cr4(X86_CR4_OSXMMEXCPT);
 -------                printk("done.\n");
 -------        }
 -------
                set_system_trap_gate(SYSCALL_VECTOR, &system_call);
                set_bit(SYSCALL_VECTOR, used_vectors);
        #endif
        #include <asm/atomic.h>
        #include <asm/ptrace.h>
        #include <asm/system.h>
 +++++++#include <trace/events/irq.h>
        
        /*
         * These correspond to the IORESOURCE_IRQ_* defines in
@@@@@@@@@ -408,15 -407,9 -407,10 -407,10 -407,10 -407,10 -407,10 -407,10 +408,14 @@@@@@@@@ asmlinkage void do_softirq(void)
        asmlinkage void __do_softirq(void);
        extern void open_softirq(int nr, void (*action)(struct softirq_action *));
        extern void softirq_init(void);
 -------#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
 +++++++static inline void __raise_softirq_irqoff(unsigned int nr)
 +++++++{
 +++++++        trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL);
 +++++++        or_softirq_pending(1UL << nr);
 +++++++}
 +++++++
        extern void raise_softirq_irqoff(unsigned int nr);
        extern void raise_softirq(unsigned int nr);
- ------extern void wakeup_softirqd(void);
        
        /* This is the worklist that queues up per-cpu softirq work.
         *
@@@@@@@@@ -647,8 -640,11 -641,11 -641,11 -641,11 -641,11 -641,11 -641,11 +646,8 @@@@@@@@@ static inline void init_irq_proc(void
        struct seq_file;
        int show_interrupts(struct seq_file *p, void *v);
        
 -------struct irq_desc;
 -------
        extern int early_irq_init(void);
        extern int arch_probe_nr_irqs(void);
        extern int arch_early_irq_init(void);
 -------extern int arch_init_chip_data(struct irq_desc *desc, int node);
        
        #endif
diff --combined kernel/softirq.c
@@@@@@@@@ -67,7 -67,7 -67,7 -67,7 -67,7 -67,7 -67,7 -67,7 +67,7 @@@@@@@@@ char *softirq_to_name[NR_SOFTIRQS] = 
         * to the pending events, so lets the scheduler to balance
         * the softirq load for us.
         */
- ------void wakeup_softirqd(void)
+ ++++++static void wakeup_softirqd(void)
        {
                /* Interrupts are disabled: no need to stop preemption */
                struct task_struct *tsk = __get_cpu_var(ksoftirqd);
        }
        
        /*
 +++++++ * preempt_count and SOFTIRQ_OFFSET usage:
 +++++++ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
 +++++++ *   softirq processing.
 +++++++ * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 +++++++ *   on local_bh_disable or local_bh_enable.
 +++++++ * This lets us distinguish between whether we are currently processing
 +++++++ * softirq and whether we just have bh disabled.
 +++++++ */
 +++++++
 +++++++/*
         * This one is for softirq.c-internal use,
         * where hardirqs are disabled legitimately:
         */
        #ifdef CONFIG_TRACE_IRQFLAGS
 -------static void __local_bh_disable(unsigned long ip)
 +++++++static void __local_bh_disable(unsigned long ip, unsigned int cnt)
        {
                unsigned long flags;
        
                 * We must manually increment preempt_count here and manually
                 * call the trace_preempt_off later.
                 */
 -------        preempt_count() += SOFTIRQ_OFFSET;
 +++++++        preempt_count() += cnt;
                /*
                 * Were softirqs turned off above:
                 */
 -------        if (softirq_count() == SOFTIRQ_OFFSET)
 +++++++        if (softirq_count() == cnt)
                        trace_softirqs_off(ip);
                raw_local_irq_restore(flags);
        
 -------        if (preempt_count() == SOFTIRQ_OFFSET)
 +++++++        if (preempt_count() == cnt)
                        trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
        }
        #else /* !CONFIG_TRACE_IRQFLAGS */
 -------static inline void __local_bh_disable(unsigned long ip)
 +++++++static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
        {
 -------        add_preempt_count(SOFTIRQ_OFFSET);
 +++++++        add_preempt_count(cnt);
                barrier();
        }
        #endif /* CONFIG_TRACE_IRQFLAGS */
        
        void local_bh_disable(void)
        {
 -------        __local_bh_disable((unsigned long)__builtin_return_address(0));
 +++++++        __local_bh_disable((unsigned long)__builtin_return_address(0),
 +++++++                                SOFTIRQ_DISABLE_OFFSET);
        }
        
        EXPORT_SYMBOL(local_bh_disable);
        
 +++++++static void __local_bh_enable(unsigned int cnt)
 +++++++{
 +++++++        WARN_ON_ONCE(in_irq());
 +++++++        WARN_ON_ONCE(!irqs_disabled());
 +++++++
 +++++++        if (softirq_count() == cnt)
 +++++++                trace_softirqs_on((unsigned long)__builtin_return_address(0));
 +++++++        sub_preempt_count(cnt);
 +++++++}
 +++++++
        /*
         * Special-case - softirqs can safely be enabled in
         * cond_resched_softirq(), or by __do_softirq(),
         */
        void _local_bh_enable(void)
        {
 -------        WARN_ON_ONCE(in_irq());
 -------        WARN_ON_ONCE(!irqs_disabled());
 -------
 -------        if (softirq_count() == SOFTIRQ_OFFSET)
 -------                trace_softirqs_on((unsigned long)__builtin_return_address(0));
 -------        sub_preempt_count(SOFTIRQ_OFFSET);
 +++++++        __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
        }
        
        EXPORT_SYMBOL(_local_bh_enable);
@@@@@@@@@ -163,13 -147,13 -147,13 -147,13 -147,13 -147,13 -147,13 -147,13 +163,13 @@@@@@@@@ static inline void _local_bh_enable_ip(
                /*
                 * Are softirqs going to be turned on now:
                 */
 -------        if (softirq_count() == SOFTIRQ_OFFSET)
 +++++++        if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
                        trace_softirqs_on(ip);
                /*
                 * Keep preemption disabled until we are done with
                 * softirq processing:
                 */
 -------        sub_preempt_count(SOFTIRQ_OFFSET - 1);
 +++++++        sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
        
                if (unlikely(!in_interrupt() && local_softirq_pending()))
                        do_softirq();
@@@@@@@@@ -214,8 -198,7 -198,7 -198,7 -198,7 -198,7 -198,7 -198,7 +214,8 @@@@@@@@@ asmlinkage void __do_softirq(void
                pending = local_softirq_pending();
                account_system_vtime(current);
        
 -------        __local_bh_disable((unsigned long)__builtin_return_address(0));
 +++++++        __local_bh_disable((unsigned long)__builtin_return_address(0),
 +++++++                                SOFTIRQ_OFFSET);
                lockdep_softirq_enter();
        
                cpu = smp_processor_id();
@@@@@@@@@ -262,7 -245,7 -245,7 -245,7 -245,7 -245,7 -245,7 -245,7 +262,7 @@@@@@@@@ restart
                lockdep_softirq_exit();
        
                account_system_vtime(current);
 -------        _local_bh_enable();
 +++++++        __local_bh_enable(SOFTIRQ_OFFSET);
        }
        
        #ifndef __ARCH_HAS_DO_SOFTIRQ
@@@@@@@@@ -296,16 -279,10 -279,10 -279,10 -279,10 -279,10 -279,10 -279,10 +296,16 @@@@@@@@@ void irq_enter(void
        
                rcu_irq_enter();
                if (idle_cpu(cpu) && !in_interrupt()) {
 -------                __irq_enter();
 +++++++                /*
 +++++++                 * Prevent raise_softirq from needlessly waking up ksoftirqd
 +++++++                 * here, as softirq will be serviced on return from interrupt.
 +++++++                 */
 +++++++                local_bh_disable();
                        tick_check_idle(cpu);
 -------        } else
 -------                __irq_enter();
 +++++++                _local_bh_enable();
 +++++++        }
 +++++++
 +++++++        __irq_enter();
        }
        
        #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
@@@@@@@@@ -719,7 -696,6 -696,6 -696,6 -696,6 -696,6 -696,6 -696,6 +719,7 @@@@@@@@@ static int run_ksoftirqd(void * __bind_
        {
                set_current_state(TASK_INTERRUPTIBLE);
        
 +++++++        current->flags |= PF_KSOFTIRQD;
                while (!kthread_should_stop()) {
                        preempt_disable();
                        if (!local_softirq_pending()) {
@@@@@@@@@ -910,14 -886,17 -886,17 -886,17 -886,17 -886,17 -886,17 -886,17 +910,14 @@@@@@@@@ int __init __weak early_irq_init(void
                return 0;
        }
        
 +++++++#ifdef CONFIG_GENERIC_HARDIRQS
        int __init __weak arch_probe_nr_irqs(void)
        {
 -------        return 0;
 +++++++        return NR_IRQS_LEGACY;
        }
        
        int __init __weak arch_early_irq_init(void)
        {
                return 0;
        }
 -------
 -------int __weak arch_init_chip_data(struct irq_desc *desc, int node)
 -------{
 -------        return 0;
 -------}
 +++++++#endif