Merge branches 'tracing/profiling', 'tracing/options' and 'tracing/urgent' into traci...
authorIngo Molnar <mingo@elte.hu>
Sun, 23 Nov 2008 08:10:32 +0000 (09:10 +0100)
committerIngo Molnar <mingo@elte.hu>
Sun, 23 Nov 2008 08:10:32 +0000 (09:10 +0100)
1  2  3  4 
Documentation/kernel-parameters.txt
arch/x86/Kconfig
kernel/trace/trace.c
kernel/trace/trace_stack.c

@@@@@ -198,42 -198,42 -198,59 -198,42 +198,42 @@@@@ and is between 256 and 4096 characters
                        that require a timer override, but don't have
                        HPET
    
  -     acpi.debug_layer=       [HW,ACPI]
  +     acpi_backlight= [HW,ACPI]
  +                     acpi_backlight=vendor
  +                     acpi_backlight=video
  +                     If set to vendor, prefer vendor specific driver
  +                     (e.g. thinkpad_acpi, sony_acpi, etc.) instead
  +                     of the ACPI video.ko driver.
  + 
  +     acpi_display_output=    [HW,ACPI]
  +                     acpi_display_output=vendor
  +                     acpi_display_output=video
  +                     See above.
  + 
  +     acpi.debug_layer=       [HW,ACPI,ACPI_DEBUG]
  +     acpi.debug_level=       [HW,ACPI,ACPI_DEBUG]
                        Format: <int>
  -                     Each bit of the <int> indicates an ACPI debug layer,
  -                     1: enable, 0: disable. It is useful for boot time
  -                     debugging. After system has booted up, it can be set
  -                     via /sys/module/acpi/parameters/debug_layer.
  -                     CONFIG_ACPI_DEBUG must be enabled for this to produce any output.
  -                     Available bits (add the numbers together) to enable debug output
  -                     for specific parts of the ACPI subsystem:
  -                     0x01 utilities 0x02 hardware 0x04 events 0x08 tables
  -                     0x10 namespace 0x20 parser 0x40 dispatcher
  -                     0x80 executer 0x100 resources 0x200 acpica debugger
  -                     0x400 os services 0x800 acpica disassembler.
  -                     The number can be in decimal or prefixed with 0x in hex.
  -                     Warning: Many of these options can produce a lot of
  -                     output and make your system unusable. Be very careful.
  - 
  -     acpi.debug_level=       [HW,ACPI]
  -                     Format: <int>
  -                     Each bit of the <int> indicates an ACPI debug level,
  -                     which corresponds to the level in an ACPI_DEBUG_PRINT
  -                     statement.  After system has booted up, this mask
  -                     can be set via /sys/module/acpi/parameters/debug_level.
  - 
  -                     CONFIG_ACPI_DEBUG must be enabled for this to produce
  -                     any output.  The number can be in decimal or prefixed
  -                     with 0x in hex.  Some of these options produce so much
  -                     output that the system is unusable.
  - 
  -                     The following global components are defined by the
  -                     ACPI CA:
  -                            0x01 error
  -                            0x02 warn
  -                            0x04 init
  -                            0x08 debug object
  -                            0x10 info
  -                            0x20 init names
  -                            0x40 parse
  -                            0x80 load
  -                           0x100 dispatch
  -                           0x200 execute
  -                           0x400 names
  -                           0x800 operation region
  -                          0x1000 bfield
  -                          0x2000 tables
  -                          0x4000 values
  -                          0x8000 objects
  -                         0x10000 resources
  -                         0x20000 user requests
  -                         0x40000 package
  -                     The number can be in decimal or prefixed with 0x in hex.
  -                     Warning: Many of these options can produce a lot of
  -                     output and make your system unusable. Be very careful.
  +                     CONFIG_ACPI_DEBUG must be enabled to produce any ACPI
  +                     debug output.  Bits in debug_layer correspond to a
  +                     _COMPONENT in an ACPI source file, e.g.,
  +                         #define _COMPONENT ACPI_PCI_COMPONENT
  +                     Bits in debug_level correspond to a level in
  +                     ACPI_DEBUG_PRINT statements, e.g.,
  +                         ACPI_DEBUG_PRINT((ACPI_DB_INFO, ...
  +                     See Documentation/acpi/debug.txt for more information
  +                     about debug layers and levels.
  + 
  +                     Enable AML "Debug" output, i.e., stores to the Debug
  +                     object while interpreting AML:
  +                         acpi.debug_layer=0xffffffff acpi.debug_level=0x2
  +                     Enable PCI/PCI interrupt routing info messages:
  +                         acpi.debug_layer=0x400000 acpi.debug_level=0x4
  +                     Enable all messages related to ACPI hardware:
  +                         acpi.debug_layer=0x2 acpi.debug_level=0xffffffff
  + 
  +                     Some values produce so much output that the system is
  +                     unusable.  The "log_buf_len" parameter may be useful
  +                     if you need to capture more output.
    
        acpi.power_nocheck=     [HW,ACPI]
                        Format: 1/0 enable/disable the check of power state.
                        Possible values are:
                        isolate - enable device isolation (each device, as far
                                  as possible, will get its own protection
---                               domain)
+++                               domain) [default]
+++                     share - put every device behind one IOMMU into the
+++                             same protection domain
                        fullflush - enable flushing of IO/TLB entries when
                                    they are unmapped. Otherwise they are
                                    flushed before they will be reused, which
    
        digiepca=       [HW,SERIAL]
                        See drivers/char/README.epca and
  -                     Documentation/digiepca.txt.
  +                     Documentation/serial/digiepca.txt.
    
        disable_mtrr_cleanup [X86]
        enable_mtrr_cleanup [X86]
                        See header of drivers/scsi/fdomain.c.
    
        floppy=         [HW]
  -                     See Documentation/floppy.txt.
  +                     See Documentation/blockdev/floppy.txt.
    
        force_pal_cache_flush
                        [IA-64] Avoid check_sal_cache_flush which may hang on
                        parameter will force ia64_sal_cache_flush to call
                        ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
    
 + +    ftrace=[tracer]
 + +                    [ftrace] will set and start the specified tracer
 + +                    as early as possible in order to facilitate early
 + +                    boot debugging.
 + +
 + +    ftrace_dump_on_oops
 + +                    [ftrace] will dump the trace buffers on oops.
 + +
        gamecon.map[2|3]=
                        [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
                        support via parallel port (up to 5 devices per port)
                        the same attribute, the last one is used.
    
        load_ramdisk=   [RAM] List of ramdisks to load from floppy
  -                     See Documentation/ramdisk.txt.
  +                     See Documentation/blockdev/ramdisk.txt.
    
        lockd.nlm_grace_period=P  [NFS] Assign grace period.
                        Format: <integer>
                        it is equivalent to "nosmp", which also disables
                        the IO APIC.
    
---     max_addr=[KMG]  [KNL,BOOT,ia64] All physical memory greater than or
---                     equal to this physical address is ignored.
+++     max_addr=nn[KMG]        [KNL,BOOT,ia64] All physical memory greater than
+++                     or equal to this physical address is ignored.
    
        max_luns=       [SCSI] Maximum number of LUNs to probe.
                        Should be between 1 and 2^32-1.
    
        mga=            [HW,DRM]
    
+++     min_addr=nn[KMG]        [KNL,BOOT,ia64] All physical memory below this
+++                     physical address is ignored.
+++ 
        mminit_loglevel=
                        [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
                        parameter allows control of the logging verbosity for
    
        pcd.            [PARIDE]
                        See header of drivers/block/paride/pcd.c.
  -                     See also Documentation/paride.txt.
  +                     See also Documentation/blockdev/paride.txt.
    
        pci=option[,option...]  [PCI] various PCI subsystem options:
                off             [X86] don't probe for the PCI bus
        pcmv=           [HW,PCMCIA] BadgePAD 4
    
        pd.             [PARIDE]
  -                     See Documentation/paride.txt.
  +                     See Documentation/blockdev/paride.txt.
    
        pdcchassis=     [PARISC,HW] Disable/Enable PDC Chassis Status codes at
                        boot time.
                        See arch/parisc/kernel/pdc_chassis.c
    
        pf.             [PARIDE]
  -                     See Documentation/paride.txt.
  +                     See Documentation/blockdev/paride.txt.
    
        pg.             [PARIDE]
  -                     See Documentation/paride.txt.
  +                     See Documentation/blockdev/paride.txt.
    
        pirq=           [SMP,APIC] Manual mp-table setup
                        See Documentation/x86/i386/IO-APIC.txt.
    
        prompt_ramdisk= [RAM] List of RAM disks to prompt for floppy disk
                        before loading.
  -                     See Documentation/ramdisk.txt.
  +                     See Documentation/blockdev/ramdisk.txt.
    
        psmouse.proto=  [HW,MOUSE] Highest PS2 mouse protocol extension to
                        probe for; one of (bare|imps|exps|lifebook|any).
                        <io>,<mss_io>,<mss_irq>,<mss_dma>,<mpu_io>,<mpu_irq>
    
        pt.             [PARIDE]
  -                     See Documentation/paride.txt.
  +                     See Documentation/blockdev/paride.txt.
    
        pty.legacy_count=
                        [KNL] Number of legacy pty's. Overwrites compiled-in
                        See Documentation/md.txt.
    
        ramdisk_blocksize=      [RAM]
  -                     See Documentation/ramdisk.txt.
  +                     See Documentation/blockdev/ramdisk.txt.
    
        ramdisk_size=   [RAM] Sizes of RAM disks in kilobytes
  -                     See Documentation/ramdisk.txt.
  +                     See Documentation/blockdev/ramdisk.txt.
    
        rcupdate.blimit=        [KNL,BOOT]
                        Set maximum number of finished RCU callbacks to process
                        See Documentation/sonypi.txt
    
        specialix=      [HW,SERIAL] Specialix multi-serial port adapter
  -                     See Documentation/specialix.txt.
  +                     See Documentation/serial/specialix.txt.
    
        spia_io_base=   [HW,MTD]
        spia_fio_base=
diff --combined arch/x86/Kconfig
@@@@@ -29,8 -29,6 -29,8 -29,6 +29,8 @@@@@ config X8
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FUNCTION_TRACER
 + +    select HAVE_FUNCTION_RET_TRACER if X86_32
 + +    select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
        select HAVE_ARCH_KGDB if !X86_VOYAGER
        select HAVE_ARCH_TRACEHOOK
@@@@@ -169,9 -167,9 -169,9 -167,12 +169,12 @@@@@ config GENERIC_PENDING_IR
    config X86_SMP
        bool
        depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
---     select USE_GENERIC_SMP_HELPERS
        default y
    
+++ config USE_GENERIC_SMP_HELPERS
+++     def_bool y
+++     depends on SMP
+++ 
    config X86_32_SMP
        def_bool y
        depends on X86_32 && SMP
@@@@@ -959,7 -957,7 -959,7 -960,7 +962,7 @@@@@ config ARCH_PHYS_ADDR_T_64BI
    config NUMA
        bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"
        depends on SMP
---     depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN)
+++     depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
        default n if X86_PC
        default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
        help
diff --combined kernel/trace/trace.c
    unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
    unsigned long __read_mostly tracing_thresh;
    
++ +/* For tracers that don't implement custom flags */
++ +static struct tracer_opt dummy_tracer_opt[] = {
++ +    { }
++ +};
++ +
++ +static struct tracer_flags dummy_tracer_flags = {
++ +    .val = 0,
++ +    .opts = dummy_tracer_opt
++ +};
++ +
++ +static int dummy_set_flag(u32 old_flags, u32 bit, int set)
++ +{
++ +    return 0;
++ +}
 + +
 + +/*
 + + * Kill all tracing for good (never come back).
 + + * It is initialized to 1 but will turn to zero if the initialization
 + + * of the tracer is successful. But that is the only place that sets
 + + * this back to zero.
 + + */
 + +int tracing_disabled = 1;
 + +
    static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
    
    static inline void ftrace_disable_cpu(void)
@@@@@ -71,36 -62,7 -85,36 -62,7 +85,36 @@@@@ static cpumask_t __read_mostly             tracing
    #define for_each_tracing_cpu(cpu)   \
        for_each_cpu_mask(cpu, tracing_buffer_mask)
    
 - -static int tracing_disabled = 1;
 + +/*
 + + * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 + + *
 + + * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 + + * is set, then ftrace_dump is called. This will output the contents
 + + * of the ftrace buffers to the console.  This is very useful for
 + + * capturing traces that lead to crashes and outputing it to a
 + + * serial console.
 + + *
 + + * It is default off, but you can enable it with either specifying
 + + * "ftrace_dump_on_oops" in the kernel command line, or setting
 + + * /proc/sys/kernel/ftrace_dump_on_oops to true.
 + + */
 + +int ftrace_dump_on_oops;
 + +
 + +static int tracing_set_tracer(char *buf);
 + +
 + +static int __init set_ftrace(char *str)
 + +{
 + +    tracing_set_tracer(str);
 + +    return 1;
 + +}
 + +__setup("ftrace", set_ftrace);
 + +
 + +static int __init set_ftrace_dump_on_oops(char *str)
 + +{
 + +    ftrace_dump_on_oops = 1;
 + +    return 1;
 + +}
 + +__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
    
    long
    ns2usecs(cycle_t nsec)
@@@@@ -150,19 -112,6 -164,19 -112,6 +164,19 @@@@@ static DEFINE_PER_CPU(struct trace_arra
    /* tracer_enabled is used to toggle activation of a tracer */
    static int                  tracer_enabled = 1;
    
 + +/**
 + + * tracing_is_enabled - return tracer_enabled status
 + + *
 + + * This function is used by other tracers to know the status
 + + * of the tracer_enabled flag.  Tracers may use this function
 + + * to know if it should enable their features when starting
 + + * up. See irqsoff tracer for an example (start_irqsoff_tracer).
 + + */
 + +int tracing_is_enabled(void)
 + +{
 + +    return tracer_enabled;
 + +}
 + +
    /* function tracing enabled */
    int                         ftrace_function_enabled;
    
@@@@@ -204,9 -153,8 -218,9 -153,8 +218,9 @@@@@ static DEFINE_MUTEX(trace_types_lock)
    /* trace_wait is a waitqueue for tasks blocked on trace_poll */
    static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
    
 - -/* trace_flags holds iter_ctrl options */
 - -unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
 + +/* trace_flags holds trace_options default values */
 + +unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
 + +    TRACE_ITER_ANNOTATE;
    
    /**
     * trace_wake_up - wake up tasks waiting for trace input
@@@@@ -245,6 -193,13 -259,6 -193,13 +259,6 @@@@@ unsigned long nsecs_to_usecs(unsigned l
        return nsecs / 1000;
    }
    
 - -/*
 - - * TRACE_ITER_SYM_MASK masks the options in trace_flags that
 - - * control the output of kernel symbols.
 - - */
 - -#define TRACE_ITER_SYM_MASK \
 - -    (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
 - -
    /* These must match the bit postions in trace_iterator_flags */
    static const char *trace_options[] = {
        "print-parent",
        "stacktrace",
        "sched-tree",
        "ftrace_printk",
 + +    "ftrace_preempt",
 + +    "branch",
 + +    "annotate",
        NULL
    };
    
@@@@@ -518,15 -470,7 -532,7 -470,7 +532,15 @@@@@ int register_tracer(struct tracer *type
                return -1;
        }
    
 +++    /*
 +++     * When this gets called we hold the BKL which means that
 +++     * preemption is disabled. Various trace selftests however
 +++     * need to disable and enable preemption for successful tests.
 +++     * So we drop the BKL here and grab it after the tests again.
 +++     */
 +++    unlock_kernel();
        mutex_lock(&trace_types_lock);
 +++
        for (t = trace_types; t; t = t->next) {
                if (strcmp(type->name, t->name) == 0) {
                        /* already found */
                }
        }
    
++ +    if (!type->set_flag)
++ +            type->set_flag = &dummy_set_flag;
++ +    if (!type->flags)
++ +            type->flags = &dummy_tracer_flags;
++ +    else
++ +            if (!type->flags->opts)
++ +                    type->flags->opts = dummy_tracer_opt;
++ +
    #ifdef CONFIG_FTRACE_STARTUP_TEST
        if (type->selftest) {
                struct tracer *saved_tracer = current_trace;
                struct trace_array *tr = &global_trace;
 - -            int saved_ctrl = tr->ctrl;
                int i;
                /*
                 * Run a selftest on this tracer.
                 * internal tracing to verify that everything is in order.
                 * If we fail, we do not register this tracer.
                 */
 ---            for_each_tracing_cpu(i) {
 +++            for_each_tracing_cpu(i)
                        tracing_reset(tr, i);
 ---            }
 +++
                current_trace = type;
 - -            tr->ctrl = 0;
                /* the test is responsible for initializing and enabling */
                pr_info("Testing tracer %s: ", type->name);
                ret = type->selftest(type, tr);
                /* the test is responsible for resetting too */
                current_trace = saved_tracer;
 - -            tr->ctrl = saved_ctrl;
                if (ret) {
                        printk(KERN_CONT "FAILED!\n");
                        goto out;
                }
                /* Only reset on passing, to avoid touching corrupted buffers */
 ---            for_each_tracing_cpu(i) {
 +++            for_each_tracing_cpu(i)
                        tracing_reset(tr, i);
 ---            }
 +++
                printk(KERN_CONT "PASSED\n");
        }
    #endif
    
     out:
        mutex_unlock(&trace_types_lock);
 +++    lock_kernel();
    
        return ret;
    }
@@@@@ -635,76 -581,6 -648,76 -581,6 +657,76 @@@@@ static void trace_init_cmdlines(void
        cmdline_idx = 0;
    }
    
 + +static int trace_stop_count;
 + +static DEFINE_SPINLOCK(tracing_start_lock);
 + +
 + +/**
 + + * tracing_start - quick start of the tracer
 + + *
 + + * If tracing is enabled but was stopped by tracing_stop,
 + + * this will start the tracer back up.
 + + */
 + +void tracing_start(void)
 + +{
 + +    struct ring_buffer *buffer;
 + +    unsigned long flags;
 + +
 + +    if (tracing_disabled)
 + +            return;
 + +
 + +    spin_lock_irqsave(&tracing_start_lock, flags);
 + +    if (--trace_stop_count)
 + +            goto out;
 + +
 + +    if (trace_stop_count < 0) {
 + +            /* Someone screwed up their debugging */
 + +            WARN_ON_ONCE(1);
 + +            trace_stop_count = 0;
 + +            goto out;
 + +    }
 + +
 + +
 + +    buffer = global_trace.buffer;
 + +    if (buffer)
 + +            ring_buffer_record_enable(buffer);
 + +
 + +    buffer = max_tr.buffer;
 + +    if (buffer)
 + +            ring_buffer_record_enable(buffer);
 + +
 + +    ftrace_start();
 + + out:
 + +    spin_unlock_irqrestore(&tracing_start_lock, flags);
 + +}
 + +
 + +/**
 + + * tracing_stop - quick stop of the tracer
 + + *
 + + * Light weight way to stop tracing. Use in conjunction with
 + + * tracing_start.
 + + */
 + +void tracing_stop(void)
 + +{
 + +    struct ring_buffer *buffer;
 + +    unsigned long flags;
 + +
 + +    ftrace_stop();
 + +    spin_lock_irqsave(&tracing_start_lock, flags);
 + +    if (trace_stop_count++)
 + +            goto out;
 + +
 + +    buffer = global_trace.buffer;
 + +    if (buffer)
 + +            ring_buffer_record_disable(buffer);
 + +
 + +    buffer = max_tr.buffer;
 + +    if (buffer)
 + +            ring_buffer_record_disable(buffer);
 + +
 + + out:
 + +    spin_unlock_irqrestore(&tracing_start_lock, flags);
 + +}
 + +
    void trace_stop_cmdline_recording(void);
    
    static void trace_save_cmdline(struct task_struct *tsk)
@@@@@ -815,35 -691,6 -828,36 -691,6 +837,36 @@@@@ trace_function(struct trace_array *tr, 
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
    }
    
 + +#ifdef CONFIG_FUNCTION_RET_TRACER
 + +static void __trace_function_return(struct trace_array *tr,
 + +                            struct trace_array_cpu *data,
 + +                            struct ftrace_retfunc *trace,
 + +                            unsigned long flags,
 + +                            int pc)
 + +{
 + +    struct ring_buffer_event *event;
 + +    struct ftrace_ret_entry *entry;
 + +    unsigned long irq_flags;
 + +
 + +    if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
 + +            return;
 + +
 + +    event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
 + +                                     &irq_flags);
 + +    if (!event)
 + +            return;
 + +    entry   = ring_buffer_event_data(event);
 + +    tracing_generic_entry_update(&entry->ent, flags, pc);
 + +    entry->ent.type                 = TRACE_FN_RET;
 + +    entry->ip                       = trace->func;
 + +    entry->parent_ip        = trace->ret;
 + +    entry->rettime          = trace->rettime;
 + +    entry->calltime         = trace->calltime;
++ +    entry->overrun          = trace->overrun;
 + +    ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
 + +}
 + +#endif
 + +
    void
    ftrace(struct trace_array *tr, struct trace_array_cpu *data,
           unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@@@@ -994,28 -841,26 -1008,28 -841,26 +1017,28 @@@@@ ftrace_special(unsigned long arg1, unsi
    {
        struct trace_array *tr = &global_trace;
        struct trace_array_cpu *data;
 + +    unsigned long flags;
        int cpu;
        int pc;
    
 - -    if (tracing_disabled || !tr->ctrl)
 + +    if (tracing_disabled)
                return;
    
        pc = preempt_count();
 - -    preempt_disable_notrace();
 + +    local_irq_save(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
    
 - -    if (likely(!atomic_read(&data->disabled)))
 + +    if (likely(atomic_inc_return(&data->disabled) == 1))
                ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
    
 - -    preempt_enable_notrace();
 + +    atomic_dec(&data->disabled);
 + +    local_irq_restore(flags);
    }
    
    #ifdef CONFIG_FUNCTION_TRACER
    static void
 - -function_trace_call(unsigned long ip, unsigned long parent_ip)
 + +function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
    {
        struct trace_array *tr = &global_trace;
        struct trace_array_cpu *data;
                return;
    
        pc = preempt_count();
 - -    resched = need_resched();
 - -    preempt_disable_notrace();
 + +    resched = ftrace_preempt_disable();
        local_save_flags(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
                trace_function(tr, data, ip, parent_ip, flags, pc);
    
        atomic_dec(&data->disabled);
 - -    if (resched)
 - -            preempt_enable_no_resched_notrace();
 - -    else
 - -            preempt_enable_notrace();
 + +    ftrace_preempt_enable(resched);
 + +}
 + +
 + +static void
 + +function_trace_call(unsigned long ip, unsigned long parent_ip)
 + +{
 + +    struct trace_array *tr = &global_trace;
 + +    struct trace_array_cpu *data;
 + +    unsigned long flags;
 + +    long disabled;
 + +    int cpu;
 + +    int pc;
 + +
 + +    if (unlikely(!ftrace_function_enabled))
 + +            return;
 + +
 + +    /*
 + +     * Need to use raw, since this must be called before the
 + +     * recursive protection is performed.
 + +     */
 + +    local_irq_save(flags);
 + +    cpu = raw_smp_processor_id();
 + +    data = tr->data[cpu];
 + +    disabled = atomic_inc_return(&data->disabled);
 + +
 + +    if (likely(disabled == 1)) {
 + +            pc = preempt_count();
 + +            trace_function(tr, data, ip, parent_ip, flags, pc);
 + +    }
 + +
 + +    atomic_dec(&data->disabled);
 + +    local_irq_restore(flags);
    }
    
 + +#ifdef CONFIG_FUNCTION_RET_TRACER
 + +void trace_function_return(struct ftrace_retfunc *trace)
 + +{
 + +    struct trace_array *tr = &global_trace;
 + +    struct trace_array_cpu *data;
 + +    unsigned long flags;
 + +    long disabled;
 + +    int cpu;
 + +    int pc;
 + +
 + +    raw_local_irq_save(flags);
 + +    cpu = raw_smp_processor_id();
 + +    data = tr->data[cpu];
 + +    disabled = atomic_inc_return(&data->disabled);
 + +    if (likely(disabled == 1)) {
 + +            pc = preempt_count();
 + +            __trace_function_return(tr, data, trace, flags, pc);
 + +    }
 + +    atomic_dec(&data->disabled);
 + +    raw_local_irq_restore(flags);
 + +}
 + +#endif /* CONFIG_FUNCTION_RET_TRACER */
 + +
    static struct ftrace_ops trace_ops __read_mostly =
    {
        .func = function_trace_call,
    void tracing_start_function_trace(void)
    {
        ftrace_function_enabled = 0;
 + +
 + +    if (trace_flags & TRACE_ITER_PREEMPTONLY)
 + +            trace_ops.func = function_trace_call_preempt_only;
 + +    else
 + +            trace_ops.func = function_trace_call;
 + +
        register_ftrace_function(&trace_ops);
 - -    if (tracer_enabled)
 - -            ftrace_function_enabled = 1;
 + +    ftrace_function_enabled = 1;
    }
    
    void tracing_stop_function_trace(void)
    
    enum trace_file_type {
        TRACE_FILE_LAT_FMT      = 1,
 + +    TRACE_FILE_ANNOTATE     = 2,
    };
    
    static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
@@@@@ -1258,6 -1047,10 -1272,6 -1047,10 +1281,6 @@@@@ static void *s_start(struct seq_file *m
    
        atomic_inc(&trace_record_cmdline_disabled);
    
 - -    /* let the tracer grab locks here if needed */
 - -    if (current_trace->start)
 - -            current_trace->start(iter);
 - -
        if (*pos != iter->pos) {
                iter->ent = NULL;
                iter->cpu = 0;
    
    static void s_stop(struct seq_file *m, void *p)
    {
 - -    struct trace_iterator *iter = m->private;
 - -
        atomic_dec(&trace_record_cmdline_disabled);
 - -
 - -    /* let the tracer release locks here if needed */
 - -    if (current_trace && current_trace == iter->trace && iter->trace->stop)
 - -            iter->trace->stop(iter);
 - -
        mutex_unlock(&trace_types_lock);
    }
    
@@@@@ -1343,7 -1143,7 -1357,7 -1143,7 +1366,7 @@@@@ seq_print_sym_offset(struct trace_seq *
    # define IP_FMT "%016lx"
    #endif
    
 - -static int
 + +int
    seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
    {
        int ret;
@@@@@ -1538,23 -1338,6 -1552,23 -1338,6 +1561,23 @@@@@ void trace_seq_print_cont(struct trace_
                trace_seq_putc(s, '\n');
    }
    
 + +static void test_cpu_buff_start(struct trace_iterator *iter)
 + +{
 + +    struct trace_seq *s = &iter->seq;
 + +
 + +    if (!(trace_flags & TRACE_ITER_ANNOTATE))
 + +            return;
 + +
 + +    if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
 + +            return;
 + +
 + +    if (cpu_isset(iter->cpu, iter->started))
 + +            return;
 + +
 + +    cpu_set(iter->cpu, iter->started);
 + +    trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
 + +}
 + +
    static enum print_line_t
    print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
    {
        if (entry->type == TRACE_CONT)
                return TRACE_TYPE_HANDLED;
    
 + +    test_cpu_buff_start(iter);
 + +
        next_entry = find_next_entry(iter, NULL, &next_ts);
        if (!next_entry)
                next_ts = iter->ts;
                        trace_seq_print_cont(s, iter);
                break;
        }
 + +    case TRACE_BRANCH: {
 + +            struct trace_branch *field;
 + +
 + +            trace_assign_type(field, entry);
 + +
 + +            trace_seq_printf(s, "[%s] %s:%s:%d\n",
 + +                             field->correct ? "  ok  " : " MISS ",
 + +                             field->func,
 + +                             field->file,
 + +                             field->line);
 + +            break;
 + +    }
        default:
                trace_seq_printf(s, "Unknown type %d\n", entry->type);
        }
@@@@@ -1703,8 -1472,6 -1717,8 -1472,6 +1726,8 @@@@@ static enum print_line_t print_trace_fm
        if (entry->type == TRACE_CONT)
                return TRACE_TYPE_HANDLED;
    
 + +    test_cpu_buff_start(iter);
 + +
        comm = trace_find_cmdline(iter->ent->pid);
    
        t = ns2usecs(iter->ts);
                        trace_seq_print_cont(s, iter);
                break;
        }
 + +    case TRACE_FN_RET: {
 + +            return print_return_function(iter);
 + +            break;
 + +    }
 + +    case TRACE_BRANCH: {
 + +            struct trace_branch *field;
 + +
 + +            trace_assign_type(field, entry);
 + +
 + +            trace_seq_printf(s, "[%s] %s:%s:%d\n",
 + +                             field->correct ? "  ok  " : " MISS ",
 + +                             field->func,
 + +                             field->file,
 + +                             field->line);
 + +            break;
 + +    }
        }
        return TRACE_TYPE_HANDLED;
    }
@@@@@ -2148,11 -1899,6 -2162,11 -1899,6 +2171,11 @@@@@ __tracing_open(struct inode *inode, str
        iter->trace = current_trace;
        iter->pos = -1;
    
 + +    /* Annotate start of buffers if we had overruns */
 + +    if (ring_buffer_overruns(iter->tr->buffer))
 + +            iter->iter_flags |= TRACE_FILE_ANNOTATE;
 + +
 + +
        for_each_tracing_cpu(cpu) {
    
                iter->buffer_iter[cpu] =
        m->private = iter;
    
        /* stop the trace while dumping */
 - -    if (iter->tr->ctrl) {
 - -            tracer_enabled = 0;
 - -            ftrace_function_enabled = 0;
 - -    }
 + +    tracing_stop();
    
        if (iter->trace && iter->trace->open)
                        iter->trace->open(iter);
                        ring_buffer_read_finish(iter->buffer_iter[cpu]);
        }
        mutex_unlock(&trace_types_lock);
 ++     kfree(iter);
    
        return ERR_PTR(-ENOMEM);
    }
@@@@@ -2217,7 -1965,14 -2230,7 -1966,14 +2240,7 @@@@@ int tracing_release(struct inode *inode
                iter->trace->close(iter);
    
        /* reenable tracing if it was previously enabled */
 - -    if (iter->tr->ctrl) {
 - -            tracer_enabled = 1;
 - -            /*
 - -             * It is safe to enable function tracing even if it
 - -             * isn't used
 - -             */
 - -            ftrace_function_enabled = 1;
 - -    }
 + +    tracing_start();
        mutex_unlock(&trace_types_lock);
    
        seq_release(inode, file);
@@@@@ -2433,13 -2188,13 -2446,16 -2189,13 +2456,16 @@@@@ static struct file_operations tracing_c
    };
    
    static ssize_t
 - -tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
 + +tracing_trace_options_read(struct file *filp, char __user *ubuf,
                       size_t cnt, loff_t *ppos)
    {
++ +    int i;
        char *buf;
        int r = 0;
        int len = 0;
-- -    int i;
++ +    u32 tracer_flags = current_trace->flags->val;
++ +    struct tracer_opt *trace_opts = current_trace->flags->opts;
++ +
    
        /* calulate max size */
        for (i = 0; trace_options[i]; i++) {
                len += 3; /* "no" and space */
        }
    
++ +    /*
++ +     * Increase the size with names of options specific
++ +     * of the current tracer.
++ +     */
++ +    for (i = 0; trace_opts[i].name; i++) {
++ +            len += strlen(trace_opts[i].name);
++ +            len += 3; /* "no" and space */
++ +    }
++ +
        /* +2 for \n and \0 */
        buf = kmalloc(len + 2, GFP_KERNEL);
        if (!buf)
                        r += sprintf(buf + r, "no%s ", trace_options[i]);
        }
    
++ +    for (i = 0; trace_opts[i].name; i++) {
++ +            if (tracer_flags & trace_opts[i].bit)
++ +                    r += sprintf(buf + r, "%s ",
++ +                            trace_opts[i].name);
++ +            else
++ +                    r += sprintf(buf + r, "no%s ",
++ +                            trace_opts[i].name);
++ +    }
++ +
        r += sprintf(buf + r, "\n");
        WARN_ON(r >= len + 2);
    
        return r;
    }
    
++ +/* Try to assign a tracer specific option */
++ +static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
++ +{
++ +    struct tracer_flags *trace_flags = trace->flags;
++ +    struct tracer_opt *opts = NULL;
++ +    int ret = 0, i = 0;
++ +    int len;
++ +
++ +    for (i = 0; trace_flags->opts[i].name; i++) {
++ +            opts = &trace_flags->opts[i];
++ +            len = strlen(opts->name);
++ +
++ +            if (strncmp(cmp, opts->name, len) == 0) {
++ +                    ret = trace->set_flag(trace_flags->val,
++ +                            opts->bit, !neg);
++ +                    break;
++ +            }
++ +    }
++ +    /* Not found */
++ +    if (!trace_flags->opts[i].name)
++ +            return -EINVAL;
++ +
++ +    /* Refused to handle */
++ +    if (ret)
++ +            return ret;
++ +
++ +    if (neg)
++ +            trace_flags->val &= ~opts->bit;
++ +    else
++ +            trace_flags->val |= opts->bit;
++ +
++ +    return 0;
++ +}
++ +
    static ssize_t
 - -tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
 + +tracing_trace_options_write(struct file *filp, const char __user *ubuf,
                        size_t cnt, loff_t *ppos)
    {
        char buf[64];
        char *cmp = buf;
        int neg = 0;
++ +    int ret;
        int i;
    
        if (cnt >= sizeof(buf))
                        break;
                }
        }
-- -    /*
-- -     * If no option could be set, return an error:
-- -     */
-- -    if (!trace_options[i])
-- -            return -EINVAL;
++ +
++ +    /* If no option could be set, test the specific tracer options */
++ +    if (!trace_options[i]) {
++ +            ret = set_tracer_option(current_trace, cmp, neg);
++ +            if (ret)
++ +                    return ret;
++ +    }
    
        filp->f_pos += cnt;
    
    
    static struct file_operations tracing_iter_fops = {
        .open           = tracing_open_generic,
 - -    .read           = tracing_iter_ctrl_read,
 - -    .write          = tracing_iter_ctrl_write,
 + +    .read           = tracing_trace_options_read,
 + +    .write          = tracing_trace_options_write,
    };
    
    static const char readme_msg[] =
        "# echo sched_switch > /debug/tracing/current_tracer\n"
        "# cat /debug/tracing/current_tracer\n"
        "sched_switch\n"
 - -    "# cat /debug/tracing/iter_ctrl\n"
 + +    "# cat /debug/tracing/trace_options\n"
        "noprint-parent nosym-offset nosym-addr noverbose\n"
 - -    "# echo print-parent > /debug/tracing/iter_ctrl\n"
 + +    "# echo print-parent > /debug/tracing/trace_options\n"
        "# echo 1 > /debug/tracing/tracing_enabled\n"
        "# cat /debug/tracing/trace > /tmp/trace.txt\n"
        "echo 0 > /debug/tracing/tracing_enabled\n"
@@@@@ -2555,10 -2310,11 -2626,10 -2311,11 +2636,10 @@@@@ static ssize_
    tracing_ctrl_read(struct file *filp, char __user *ubuf,
                  size_t cnt, loff_t *ppos)
    {
 - -    struct trace_array *tr = filp->private_data;
        char buf[64];
        int r;
    
 - -    r = sprintf(buf, "%ld\n", tr->ctrl);
 + +    r = sprintf(buf, "%u\n", tracer_enabled);
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    }
    
@@@@@ -2586,18 -2342,16 -2657,18 -2343,16 +2667,18 @@@@@ tracing_ctrl_write(struct file *filp, c
        val = !!val;
    
        mutex_lock(&trace_types_lock);
 - -    if (tr->ctrl ^ val) {
 - -            if (val)
 + +    if (tracer_enabled ^ val) {
 + +            if (val) {
                        tracer_enabled = 1;
 - -            else
 + +                    if (current_trace->start)
 + +                            current_trace->start(tr);
 + +                    tracing_start();
 + +            } else {
                        tracer_enabled = 0;
 - -
 - -            tr->ctrl = val;
 - -
 - -            if (current_trace && current_trace->ctrl_update)
 - -                    current_trace->ctrl_update(tr);
 + +                    tracing_stop();
 + +                    if (current_trace->stop)
 + +                            current_trace->stop(tr);
 + +            }
        }
        mutex_unlock(&trace_types_lock);
    
@@@@@ -2623,11 -2377,29 -2694,11 -2378,29 +2704,11 @@@@@ tracing_set_trace_read(struct file *fil
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    }
    
 - -static ssize_t
 - -tracing_set_trace_write(struct file *filp, const char __user *ubuf,
 - -                    size_t cnt, loff_t *ppos)
 + +static int tracing_set_tracer(char *buf)
    {
        struct trace_array *tr = &global_trace;
        struct tracer *t;
 - -    char buf[max_tracer_type_len+1];
 - -    int i;
 - -    size_t ret;
 - -
 - -    ret = cnt;
 - -
 - -    if (cnt > max_tracer_type_len)
 - -            cnt = max_tracer_type_len;
 - -
 - -    if (copy_from_user(&buf, ubuf, cnt))
 - -            return -EFAULT;
 - -
 - -    buf[cnt] = 0;
 - -
 - -    /* strip ending whitespace. */
 - -    for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
 - -            buf[i] = 0;
 + +    int ret = 0;
    
        mutex_lock(&trace_types_lock);
        for (t = trace_types; t; t = t->next) {
        if (t == current_trace)
                goto out;
    
 + +    trace_branch_disable();
        if (current_trace && current_trace->reset)
                current_trace->reset(tr);
    
        current_trace = t;
 - -    if (t->init)
 - -            t->init(tr);
 + +    if (t->init) {
 + +            ret = t->init(tr);
 + +            if (ret)
 + +                    goto out;
 + +    }
    
 + +    trace_branch_enable(tr);
     out:
        mutex_unlock(&trace_types_lock);
    
 - -    if (ret > 0)
 - -            filp->f_pos += ret;
 + +    return ret;
 + +}
 + +
 + +static ssize_t
 + +tracing_set_trace_write(struct file *filp, const char __user *ubuf,
 + +                    size_t cnt, loff_t *ppos)
 + +{
 + +    char buf[max_tracer_type_len+1];
 + +    int i;
 + +    size_t ret;
 + +    int err;
 + +
 + +    ret = cnt;
 + +
 + +    if (cnt > max_tracer_type_len)
 + +            cnt = max_tracer_type_len;
 + +
 + +    if (copy_from_user(&buf, ubuf, cnt))
 + +            return -EFAULT;
 + +
 + +    buf[cnt] = 0;
 + +
 + +    /* strip ending whitespace. */
 + +    for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
 + +            buf[i] = 0;
 + +
 + +    err = tracing_set_tracer(buf);
 + +    if (err)
 + +            return err;
 + +
 + +    filp->f_pos += ret;
    
        return ret;
    }
@@@@@ -2753,10 -2491,6 -2824,10 -2492,6 +2834,10 @@@@@ static int tracing_open_pipe(struct ino
                return -ENOMEM;
    
        mutex_lock(&trace_types_lock);
 + +
 + +    /* trace pipe does not show start of buffer */
 + +    cpus_setall(iter->started);
 + +
        iter->tr = &global_trace;
        iter->trace = current_trace;
        filp->private_data = iter;
@@@@@ -2932,7 -2666,7 -3003,7 -2667,7 +3013,7 @@@@@ tracing_entries_read(struct file *filp
        char buf[64];
        int r;
    
 - -    r = sprintf(buf, "%lu\n", tr->entries);
 + +    r = sprintf(buf, "%lu\n", tr->entries >> 10);
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    }
    
@@@@@ -2943,6 -2677,7 -3014,6 -2678,7 +3024,6 @@@@@ tracing_entries_write(struct file *filp
        unsigned long val;
        char buf[64];
        int ret, cpu;
 - -    struct trace_array *tr = filp->private_data;
    
        if (cnt >= sizeof(buf))
                return -EINVAL;
    
        mutex_lock(&trace_types_lock);
    
 - -    if (tr->ctrl) {
 - -            cnt = -EBUSY;
 - -            pr_info("ftrace: please disable tracing"
 - -                    " before modifying buffer size\n");
 - -            goto out;
 - -    }
 + +    tracing_stop();
    
        /* disable all cpu buffers */
        for_each_tracing_cpu(cpu) {
                        atomic_inc(&max_tr.data[cpu]->disabled);
        }
    
 + +    /* value is in KB */
 + +    val <<= 10;
 + +
        if (val != global_trace.entries) {
                ret = ring_buffer_resize(global_trace.buffer, val);
                if (ret < 0) {
                        atomic_dec(&max_tr.data[cpu]->disabled);
        }
    
 + +    tracing_start();
        max_tr.entries = global_trace.entries;
        mutex_unlock(&trace_types_lock);
    
@@@@@ -3036,8 -2772,9 -3107,8 -2773,9 +3117,8 @@@@@ tracing_mark_write(struct file *filp, c
    {
        char *buf;
        char *end;
 - -    struct trace_array *tr = &global_trace;
    
 - -    if (!tr->ctrl || tracing_disabled)
 + +    if (tracing_disabled)
                return -EINVAL;
    
        if (cnt > TRACE_BUF_SIZE)
@@@@@ -3103,38 -2840,22 -3174,38 -2841,22 +3184,38 @@@@@ static struct file_operations tracing_m
    
    #ifdef CONFIG_DYNAMIC_FTRACE
    
 + +int __weak ftrace_arch_read_dyn_info(char *buf, int size)
 + +{
 + +    return 0;
 + +}
 + +
    static ssize_t
 - -tracing_read_long(struct file *filp, char __user *ubuf,
 + +tracing_read_dyn_info(struct file *filp, char __user *ubuf,
                  size_t cnt, loff_t *ppos)
    {
 + +    static char ftrace_dyn_info_buffer[1024];
 + +    static DEFINE_MUTEX(dyn_info_mutex);
        unsigned long *p = filp->private_data;
 - -    char buf[64];
 + +    char *buf = ftrace_dyn_info_buffer;
 + +    int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
        int r;
    
 - -    r = sprintf(buf, "%ld\n", *p);
 + +    mutex_lock(&dyn_info_mutex);
 + +    r = sprintf(buf, "%ld ", *p);
    
 - -    return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 + +    r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
 + +    buf[r++] = '\n';
 + +
 + +    r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 + +
 + +    mutex_unlock(&dyn_info_mutex);
 + +
 + +    return r;
    }
    
 - -static struct file_operations tracing_read_long_fops = {
 + +static struct file_operations tracing_dyn_info_fops = {
        .open           = tracing_open_generic,
 - -    .read           = tracing_read_long,
 + +    .read           = tracing_read_dyn_info,
    };
    #endif
    
@@@@@ -3175,10 -2896,10 -3246,10 -2897,10 +3256,10 @@@@@ static __init int tracer_init_debugfs(v
        if (!entry)
                pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
    
 - -    entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
 + +    entry = debugfs_create_file("trace_options", 0644, d_tracer,
                                    NULL, &tracing_iter_fops);
        if (!entry)
 - -            pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
 + +            pr_warning("Could not create debugfs 'trace_options' entry\n");
    
        entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
                                    NULL, &tracing_cpumask_fops);
                pr_warning("Could not create debugfs "
                           "'trace_pipe' entry\n");
    
 - -    entry = debugfs_create_file("trace_entries", 0644, d_tracer,
 + +    entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
                                    &global_trace, &tracing_entries_fops);
        if (!entry)
                pr_warning("Could not create debugfs "
 - -                       "'trace_entries' entry\n");
 + +                       "'buffer_size_kb' entry\n");
    
        entry = debugfs_create_file("trace_marker", 0220, d_tracer,
                                    NULL, &tracing_mark_fops);
    #ifdef CONFIG_DYNAMIC_FTRACE
        entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
                                    &ftrace_update_tot_cnt,
 - -                                &tracing_read_long_fops);
 + +                                &tracing_dyn_info_fops);
        if (!entry)
                pr_warning("Could not create debugfs "
                           "'dyn_ftrace_total_info' entry\n");
@@@@@ -3266,7 -2987,7 -3337,7 -2988,7 +3347,7 @@@@@ int trace_vprintk(unsigned long ip, con
        unsigned long flags, irq_flags;
        int cpu, len = 0, size, pc;
    
 - -    if (!tr->ctrl || tracing_disabled)
 + +    if (tracing_disabled)
                return 0;
    
        pc = preempt_count();
@@@@@ -3324,8 -3045,7 -3395,8 -3046,7 +3405,8 @@@@@ EXPORT_SYMBOL_GPL(__ftrace_printk)
    static int trace_panic_handler(struct notifier_block *this,
                               unsigned long event, void *unused)
    {
 - -    ftrace_dump();
 + +    if (ftrace_dump_on_oops)
 + +            ftrace_dump();
        return NOTIFY_OK;
    }
    
@@@@@ -3341,8 -3061,7 -3412,8 -3062,7 +3422,8 @@@@@ static int trace_die_handler(struct not
    {
        switch (val) {
        case DIE_OOPS:
 - -            ftrace_dump();
 + +            if (ftrace_dump_on_oops)
 + +                    ftrace_dump();
                break;
        default:
                break;
@@@@@ -3383,6 -3102,7 -3454,6 -3103,7 +3464,6 @@@@@ trace_printk_seq(struct trace_seq *s
        trace_seq_reset(s);
    }
    
 - -
    void ftrace_dump(void)
    {
        static DEFINE_SPINLOCK(ftrace_dump_lock);
@@@@@ -3500,6 -3220,7 -3571,6 -3221,7 +3581,6 @@@@@ __init static int tracer_alloc_buffers(
    #endif
    
        /* All seems OK, enable tracing */
 - -    global_trace.ctrl = tracer_enabled;
        tracing_disabled = 0;
    
        atomic_notifier_chain_register(&panic_notifier_list,
@@@@@ -107,7 -107,8 -107,7 -107,8 +107,7 @@@@@ stack_trace_call(unsigned long ip, unsi
        if (unlikely(!ftrace_enabled || stack_trace_disabled))
                return;
    
 - -    resched = need_resched();
 - -    preempt_disable_notrace();
 + +    resched = ftrace_preempt_disable();
    
        cpu = raw_smp_processor_id();
        /* no atomic needed, we only modify this variable by this cpu */
     out:
        per_cpu(trace_active, cpu)--;
        /* prevent recursion in schedule */
 - -    if (resched)
 - -            preempt_enable_no_resched_notrace();
 - -    else
 - -            preempt_enable_notrace();
 + +    ftrace_preempt_enable(resched);
    }
    
    static struct ftrace_ops trace_ops __read_mostly =
@@@@@ -180,11 -184,11 -180,11 -184,16 +180,16 @@@@@ static struct file_operations stack_max
    static void *
    t_next(struct seq_file *m, void *v, loff_t *pos)
    {
---     long i = (long)m->private;
+++     long i;
    
        (*pos)++;
    
---     i++;
+++     if (v == SEQ_START_TOKEN)
+++             i = 0;
+++     else {
+++             i = *(long *)v;
+++             i++;
+++     }
    
        if (i >= max_stack_trace.nr_entries ||
            stack_dump_trace[i] == ULONG_MAX)
    
    static void *t_start(struct seq_file *m, loff_t *pos)
    {
---     void *t = &m->private;
+++     void *t = SEQ_START_TOKEN;
        loff_t l = 0;
    
        local_irq_disable();
        __raw_spin_lock(&max_stack_lock);
    
+++     if (*pos == 0)
+++             return SEQ_START_TOKEN;
+++ 
        for (; t && l < *pos; t = t_next(m, t, &l))
                ;
    
@@@@@ -231,10 -235,10 -231,10 -243,10 +239,10 @@@@@ static int trace_lookup_stack(struct se
    
    static int t_show(struct seq_file *m, void *v)
    {
---     long i = *(long *)v;
+++     long i;
        int size;
    
---     if (i < 0) {
+++     if (v == SEQ_START_TOKEN) {
                seq_printf(m, "        Depth   Size      Location"
                           "    (%d entries)\n"
                           "        -----   ----      --------\n",
                return 0;
        }
    
+++     i = *(long *)v;
+++ 
        if (i >= max_stack_trace.nr_entries ||
            stack_dump_trace[i] == ULONG_MAX)
                return 0;
@@@@@ -271,10 -275,10 -271,10 -285,6 +281,6 @@@@@ static int stack_trace_open(struct inod
        int ret;
    
        ret = seq_open(file, &stack_trace_seq_ops);
---     if (!ret) {
---             struct seq_file *m = file->private_data;
---             m->private = (void *)-1;
---     }
    
        return ret;
    }