debug: show more debug info when hardlockup happens
authorjiamin ma <jiamin.ma@amlogic.com>
Thu, 6 Sep 2018 07:32:28 +0000 (15:32 +0800)
committerJianxin Pan <jianxin.pan@amlogic.com>
Fri, 14 Sep 2018 11:59:21 +0000 (04:59 -0700)
PD#173193: need more debug info when hardlockup happens

Change-Id: Ia265a58ba776168ac4838cc7f4a6d7cfbeaf6557
Signed-off-by: jiamin ma <jiamin.ma@amlogic.com>
13 files changed:
MAINTAINERS
arch/arm/configs/meson64_a32_defconfig
arch/arm/include/asm/irqflags.h
arch/arm/kernel/process.c
arch/arm64/include/asm/irqflags.h
arch/arm64/kernel/process.c
drivers/amlogic/debug/debug_lockup.c
drivers/amlogic/debug/irqflags_debug_arm.h [new file with mode: 0644]
drivers/amlogic/debug/irqflags_debug_arm64.h [moved from drivers/amlogic/debug/irqflags_debug.h with 95% similarity]
include/linux/amlogic/debug_lockup.h
kernel/irq/chip.c
kernel/irq/handle.c
kernel/softirq.c

index 941872e..b974a98 100644 (file)
@@ -14486,7 +14486,7 @@ F:      drivers/amlogic/defendkey/*
 
 AMLOGIC DEBUG
 M: Jianxin Pan <jianxin.pan@amlogic.com>
-F:  drivers/amlogic/debug/
+F:  drivers/amlogic/debug/*
 
 AMLOGIC G12A spdif channel status
 M: xing wang<xing.wang@amlogic.com>
index 07fac6f..015afb7 100644 (file)
@@ -348,6 +348,8 @@ CONFIG_AMLOGIC_SARADC=y
 CONFIG_AMLOGIC_TEE=y
 CONFIG_AMLOGIC_GPIO_IRQ=y
 CONFIG_AMLOGIC_ATV_DEMOD=y
+CONFIG_AMLOGIC_DEBUG=y
+CONFIG_AMLOGIC_DEBUG_LOCKUP=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
index e6b70d9..02f5c70 100644 (file)
@@ -5,6 +5,10 @@
 
 #include <asm/ptrace.h>
 
+#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP
+#include <../drivers/amlogic/debug/irqflags_debug_arm.h>
+#else
+
 /*
  * CPU interrupt mask handling.
  */
@@ -182,5 +186,6 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
 
 #include <asm-generic/irqflags.h>
 
+#endif
 #endif /* ifdef __KERNEL__ */
 #endif /* ifndef __ASM_ARM_IRQFLAGS_H */
index 38ad8b9..ffa954c 100644 (file)
 #include <asm/tls.h>
 #include <asm/vdso.h>
 
+#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP
+#include <linux/amlogic/debug_lockup.h>
+#endif
+
 #ifdef CONFIG_CC_STACKPROTECTOR
 #include <linux/stackprotector.h>
 unsigned long __stack_chk_guard __read_mostly;
@@ -80,6 +84,9 @@ void arch_cpu_idle_prepare(void)
 
 void arch_cpu_idle_enter(void)
 {
+#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP
+       __arch_cpu_idle_enter();
+#endif
        idle_notifier_call_chain(IDLE_START);
        ledtrig_cpu(CPU_LED_IDLE_START);
 #ifdef CONFIG_PL310_ERRATA_769419
@@ -91,6 +98,9 @@ void arch_cpu_idle_exit(void)
 {
        ledtrig_cpu(CPU_LED_IDLE_END);
        idle_notifier_call_chain(IDLE_END);
+#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP
+       __arch_cpu_idle_exit();
+#endif
 }
 
 /*
index 81db6c9..03c1838 100644 (file)
@@ -21,7 +21,7 @@
 #include <asm/ptrace.h>
 
 #ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP
-#include <../drivers/amlogic/debug/irqflags_debug.h>
+#include <../drivers/amlogic/debug/irqflags_debug_arm64.h>
 #else
 /*
  * CPU interrupt mask handling.
index 882c808..f458f44 100644 (file)
@@ -92,6 +92,18 @@ void arch_cpu_idle_dead(void)
 }
 #endif
 
+#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP
+void arch_cpu_idle_enter(void)
+{
+       __arch_cpu_idle_enter();
+}
+
+void arch_cpu_idle_exit(void)
+{
+       __arch_cpu_idle_exit();
+}
+#endif
+
 /*
  * Called by kexec, immediately prior to machine_kexec().
  *
index eec7a24..3c60c92 100644 (file)
 #define IRQ_CNT                        256
 #define CCCNT_WARN             1000
 #define CPU                    8
-static unsigned long t_base[IRQ_CNT];
-static unsigned long t_isr[IRQ_CNT];
-static unsigned long t_total[IRQ_CNT];
+static unsigned long long t_base[IRQ_CNT];
+static unsigned long long t_isr[IRQ_CNT];
+static unsigned long long t_total[IRQ_CNT];
 static unsigned int cnt_total[IRQ_CNT];
 static void *cpu_action[CPU];
 static int cpu_irq[CPU] = {0};
 static void *cpu_sirq[CPU] = {NULL};
 
 /*irq disable trace*/
-#define LONG_IRQDIS            (1000 * 1000000)        /*500 ms*/
+#define LONG_IRQDIS            (1000 * 1000000)/*1000 ms*/
 #define OUT_WIN                        (500 * 1000000) /*500 ms*/
 #define LONG_IDLE              (5000000000)    /*5 sec*/
 #define ENTRY                  10
-static unsigned long           t_i_d[CPU];
+static unsigned long long      t_i_d[CPU];
 static int                     irq_flg;
 static struct stack_trace      irq_trace[CPU];
 static unsigned long           t_entrys[CPU][ENTRY];
-static unsigned long           t_idle[CPU] = { 0 };
-static unsigned long           t_d_out;
+static unsigned long long      t_idle[CPU] = { 0 };
+static unsigned long long      t_d_out;
 
 static unsigned long isr_thr = LONG_ISR;
 core_param(isr_thr, isr_thr, ulong, 0644);
@@ -68,7 +68,7 @@ core_param(isr_check_en, isr_check_en, int, 0644);
 static unsigned long out_thr = OUT_WIN;
 core_param(out_thr, out_thr, ulong, 0644);
 
-void notrace isr_in_hook(unsigned int cpu, unsigned long *tin,
+void notrace isr_in_hook(unsigned int cpu, unsigned long long *tin,
        unsigned int irq, void *act)
 {
        if (irq >= IRQ_CNT || !isr_check_en || oops_in_progress)
@@ -84,10 +84,14 @@ void notrace isr_in_hook(unsigned int cpu, unsigned long *tin,
        }
 }
 
-void notrace isr_out_hook(unsigned int cpu, unsigned long tin, unsigned int irq)
+void notrace isr_out_hook(unsigned int cpu, unsigned long long tin,
+       unsigned int irq)
 {
-       unsigned long tout;
-       unsigned int ratio = 0;
+       unsigned long long tout;
+       unsigned long long ratio = 0;
+       unsigned long long t_diff;
+       unsigned long long t_isr_tmp;
+       unsigned long long t_total_tmp;
 
        if (!isr_check_en || oops_in_progress)
                return;
@@ -99,20 +103,26 @@ void notrace isr_out_hook(unsigned int cpu, unsigned long tin, unsigned int irq)
        t_total[irq] = (tout-t_base[irq]);
        cnt_total[irq]++;
 
-       if (tout > isr_thr + tin)
-               pr_err("ISR_Long___ERR. irq:%d  tout-tin:%ld ms\n",
-                       irq, (tout - tin) / ns2ms);
+       if (tout > isr_thr + tin) {
+               t_diff = tout - tin;
+               do_div(t_diff, ns2ms);
+               pr_err("ISR_Long___ERR. irq:%d  tout-tin:%llu ms\n",
+                       irq, t_diff);
+       }
 
        if (t_total[irq] < CHK_WINDOW)
                return;
 
-       ratio = t_isr[irq] * 100 / t_total[irq];
-       if (ratio >= 35) {
-               pr_err("IRQRatio___ERR.irq:%d ratio:%d\n", irq, (int)ratio);
-               pr_err("t_isr:%d  t_total:%d, cnt:%d\n",
-                       (int)(t_isr[irq] / ns2ms),
-                       (int)(t_total[irq] / ns2ms),
-                       cnt_total[irq]);
+       if (t_isr[irq] * 100 >= 35 * t_total[irq]) {
+               t_isr_tmp = t_isr[irq];
+               do_div(t_isr_tmp, ns2ms);
+               t_total_tmp = t_total[irq];
+               do_div(t_total_tmp, ns2ms);
+               ratio = t_isr_tmp * 100;
+               do_div(ratio, t_total_tmp);
+               pr_err("IRQRatio___ERR.irq:%d ratio:%llu\n", irq, ratio);
+               pr_err("t_isr:%llu  t_total:%llu, cnt:%d\n",
+                       t_isr_tmp, t_total_tmp, cnt_total[irq]);
        }
        t_base[irq] = sched_clock();
        t_isr[irq] = 0;
@@ -121,19 +131,22 @@ void notrace isr_out_hook(unsigned int cpu, unsigned long tin, unsigned int irq)
        cpu_action[cpu] = NULL;
 }
 
-void notrace sirq_in_hook(unsigned int cpu, unsigned long *tin, void *p)
+void notrace sirq_in_hook(unsigned int cpu, unsigned long long *tin, void *p)
 {
        cpu_sirq[cpu] = p;
        *tin = sched_clock();
 }
-void notrace sirq_out_hook(unsigned int cpu, unsigned long tin, void *p)
+void notrace sirq_out_hook(unsigned int cpu, unsigned long long tin, void *p)
 {
-       unsigned long tout = sched_clock();
+       unsigned long long tout = sched_clock();
+       unsigned long long t_diff;
 
        if (cpu_sirq[cpu] && tin && (tout > tin + sirq_thr) &&
                !oops_in_progress) {
-               pr_err("SIRQLong___ERR. sirq:%p  tout-tin:%ld ms\n",
-                       p, (tout - tin) / ns2ms);
+               t_diff = tout - tin;
+               do_div(t_diff, ns2ms);
+               pr_err("SIRQLong___ERR. sirq:%p  tout-tin:%llu ms\n",
+                       p, t_diff);
        }
        cpu_sirq[cpu] = NULL;
 }
@@ -145,6 +158,7 @@ void notrace irq_trace_start(unsigned long flags)
 
        if (!irq_flg  || !irq_check_en || oops_in_progress)
                return;
+
        if (arch_irqs_disabled_flags(flags))
                return;
 
@@ -162,13 +176,14 @@ void notrace irq_trace_start(unsigned long flags)
        irq_trace[cpu].skip = 2;
        irq_trace[cpu].nr_entries = 0;
        t_i_d[cpu] = sched_clock();
+
        save_stack_trace(&irq_trace[cpu]);
 }
 EXPORT_SYMBOL(irq_trace_start);
 
 void notrace irq_trace_stop(unsigned long flag)
 {
-       unsigned long t_i_e, t;
+       unsigned long long t_i_e, t;
        unsigned int cpu;
        int softirq = 0;
        static int out_cnt;
@@ -201,9 +216,11 @@ void notrace irq_trace_stop(unsigned long flag)
                out_cnt++;
                if (t_i_e >= t_d_out + out_thr) {
                        t_d_out = t_i_e;
-                       pr_err("\n\nDisIRQ___ERR:%ld ms <%ld %ld> %d:\n",
-                               t / ns2ms, t_i_e / ns2ms, t_i_d[cpu] / ns2ms,
-                               out_cnt);
+                       do_div(t, ns2ms);
+                       do_div(t_i_e, ns2ms);
+                       do_div(t_i_d[cpu], ns2ms);
+                       pr_err("\n\nDisIRQ___ERR:%llu ms <%llu %llu> %d:\n",
+                               t, t_i_e, t_i_d[cpu], out_cnt);
                        print_stack_trace(&irq_trace[cpu], 0);
                        dump_stack();
                }
@@ -216,7 +233,7 @@ void __attribute__((weak)) lockup_hook(int cpu)
 {
 }
 
-void  notrace arch_cpu_idle_enter(void)
+void  notrace __arch_cpu_idle_enter(void)
 {
        int cpu;
 
@@ -226,7 +243,7 @@ void  notrace arch_cpu_idle_enter(void)
        put_cpu();
        t_idle[cpu] = local_clock();
 }
-void  notrace arch_cpu_idle_exit(void)
+void  notrace __arch_cpu_idle_exit(void)
 {
        int cpu;
 
@@ -242,6 +259,10 @@ void pr_lockup_info(int c)
        int cpu;
        int virq = irq_check_en;
        int visr = isr_check_en;
+       unsigned long long t_idle_diff;
+       unsigned long long t_idle_tmp;
+       unsigned long long t_i_diff;
+       unsigned long long t_i_tmp;
 
        irq_flg = 0;
        irq_check_en = 0;
@@ -250,7 +271,7 @@ void pr_lockup_info(int c)
        pr_err("\n\n\nHARDLOCKUP____ERR.CPU[%d] <irqen:%d isren%d>START\n",
                c, virq, visr);
        for_each_online_cpu(cpu) {
-               unsigned long t_cur = sched_clock();
+               unsigned long long t_cur = sched_clock();
                struct task_struct *p = (cpu_rq(cpu)->curr);
                int preempt = task_thread_info(p)->preempt_count;
 
@@ -264,15 +285,22 @@ void pr_lockup_info(int c)
                        (unsigned int)(preempt & SOFTIRQ_MASK));
 
                if (t_i_d[cpu]) {
-                       pr_err("IRQ____ERR[%d]. <%ld %ld>.\n",
-                               cpu,  t_i_d[cpu] / ns2ms,
-                               (t_cur-t_i_d[cpu]) / ns2ms);
+                       t_i_diff = t_cur-t_i_d[cpu];
+                       do_div(t_i_diff, ns2ms);
+                       t_i_tmp = t_i_d[cpu];
+                       do_div(t_i_tmp, ns2ms);
+                       pr_err("IRQ____ERR[%d]. <%llu %llu>.\n",
+                               cpu, t_i_tmp, t_i_diff);
                        print_stack_trace(&irq_trace[cpu], 0);
                }
                if (t_idle[cpu] && (t_idle[cpu] > LONG_IDLE + t_cur)) {
-                       pr_err("CPU[%d] IdleLong____ERR:%ld ms <%ld %ld>\n",
-                               cpu, (t_cur - t_idle[cpu]) / ns2ms,
-                               t_cur / ns2ms, t_idle[cpu] / ns2ms);
+                       t_idle_diff = t_cur - t_idle[cpu];
+                       do_div(t_idle_diff, ns2ms);
+                       do_div(t_cur, ns2ms);
+                       t_idle_tmp = t_idle[cpu];
+                       do_div(t_idle_tmp, ns2ms);
+                       pr_err("CPU[%d] IdleLong____ERR:%llu ms <%llu %llu>\n",
+                               cpu, t_idle_diff, t_cur, t_idle_tmp);
                }
                dump_cpu_task(cpu);
                lockup_hook(cpu);
diff --git a/drivers/amlogic/debug/irqflags_debug_arm.h b/drivers/amlogic/debug/irqflags_debug_arm.h
new file mode 100644 (file)
index 0000000..3f8f28d
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * drivers/amlogic/debug/irqflags_debug_arm.h
+ *
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __ASM_IRQFLAGS_DEBUG_ARM_H
+#define __ASM_IRQFLAGS_DEBUG_ARM_H
+
+#ifdef __KERNEL__
+
+#include <linux/amlogic/debug_lockup.h>
+
+/*
+ * CPU interrupt mask handling.
+ */
+#ifdef CONFIG_CPU_V7M
+#define IRQMASK_REG_NAME_R "primask"
+#define IRQMASK_REG_NAME_W "primask"
+#define IRQMASK_I_BIT  1
+#else
+#define IRQMASK_REG_NAME_R "cpsr"
+#define IRQMASK_REG_NAME_W "cpsr_c"
+#define IRQMASK_I_BIT  PSR_I_BIT
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define arch_local_irq_save arch_local_irq_save
+static inline unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
+
+       asm volatile(
+               "mrs    %0, " IRQMASK_REG_NAME_R "      @ arch_local_irq_save\n"
+               "cpsid  i"
+               : "=r" (flags) : : "memory", "cc");
+       irq_trace_start(flags);
+       return flags;
+}
+
+#define arch_local_irq_enable arch_local_irq_enable
+static inline void arch_local_irq_enable(void)
+{
+       irq_trace_stop(0);
+       asm volatile(
+               "       cpsie i                 @ arch_local_irq_enable"
+               :
+               :
+               : "memory", "cc");
+}
+
+#define arch_local_irq_disable arch_local_irq_disable
+static inline void arch_local_irq_disable(void)
+{
+#if 0
+       asm volatile(
+               "       cpsid i                 @ arch_local_irq_disable"
+               :
+               :
+               : "memory", "cc");
+#endif
+       arch_local_irq_save();
+}
+
+#define local_fiq_enable()  __asm__("cpsie f   @ __stf" : : : "memory", "cc")
+#define local_fiq_disable() __asm__("cpsid f   @ __clf" : : : "memory", "cc")
+
+#ifndef CONFIG_CPU_V7M
+#define local_abt_enable()  __asm__("cpsie a   @ __sta" : : : "memory", "cc")
+#define local_abt_disable() __asm__("cpsid a   @ __cla" : : : "memory", "cc")
+#else
+#define local_abt_enable()     do { } while (0)
+#define local_abt_disable()    do { } while (0)
+#endif
+#else
+
+/*
+ * Save the current interrupt enable state & disable IRQs
+ */
+#define arch_local_irq_save arch_local_irq_save
+static inline unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags, temp;
+
+       asm volatile(
+               "       mrs     %0, cpsr        @ arch_local_irq_save\n"
+               "       orr     %1, %0, #128\n"
+               "       msr     cpsr_c, %1"
+               : "=r" (flags), "=r" (temp)
+               :
+               : "memory", "cc");
+       return flags;
+}
+
+/*
+ * Enable IRQs
+ */
+#define arch_local_irq_enable arch_local_irq_enable
+static inline void arch_local_irq_enable(void)
+{
+       unsigned long temp;
+
+       asm volatile(
+               "       mrs     %0, cpsr        @ arch_local_irq_enable\n"
+               "       bic     %0, %0, #128\n"
+               "       msr     cpsr_c, %0"
+               : "=r" (temp)
+               :
+               : "memory", "cc");
+}
+
+/*
+ * Disable IRQs
+ */
+#define arch_local_irq_disable arch_local_irq_disable
+static inline void arch_local_irq_disable(void)
+{
+       unsigned long temp;
+
+       asm volatile(
+               "       mrs     %0, cpsr        @ arch_local_irq_disable\n"
+               "       orr     %0, %0, #128\n"
+               "       msr     cpsr_c, %0"
+               : "=r" (temp)
+               :
+               : "memory", "cc");
+}
+
+/*
+ * Enable FIQs
+ */
+#define local_fiq_enable()                                     \
+       ({                                                      \
+               unsigned long temp;                             \
+       __asm__ __volatile__(                                   \
+       "mrs    %0, cpsr                @ stf\n"                \
+"      bic     %0, %0, #64\n"                                  \
+"      msr     cpsr_c, %0"                                     \
+       : "=r" (temp)                                           \
+       :                                                       \
+       : "memory", "cc");                                      \
+       })
+
+/*
+ * Disable FIQs
+ */
+#define local_fiq_disable()                                    \
+       ({                                                      \
+               unsigned long temp;                             \
+       __asm__ __volatile__(                                   \
+       "mrs    %0, cpsr                @ clf\n"                \
+"      orr     %0, %0, #64\n"                                  \
+"      msr     cpsr_c, %0"                                     \
+       : "=r" (temp)                                           \
+       :                                                       \
+       : "memory", "cc");                                      \
+       })
+
+#define local_abt_enable()     do { } while (0)
+#define local_abt_disable()    do { } while (0)
+#endif
+
+/*
+ * Save the current interrupt enable state.
+ */
+#define arch_local_save_flags arch_local_save_flags
+static inline unsigned long arch_local_save_flags(void)
+{
+       unsigned long flags;
+
+       asm volatile(
+               "mrs    %0, " IRQMASK_REG_NAME_R "      @ local_save_flags"
+               : "=r" (flags) : : "memory", "cc");
+       return flags;
+}
+
+/*
+ * restore saved IRQ & FIQ state
+ */
+#define arch_local_irq_restore arch_local_irq_restore
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+       irq_trace_stop(flags);
+       asm volatile(
+               "msr    " IRQMASK_REG_NAME_W ", %0      @ local_irq_restore"
+               :
+               : "r" (flags)
+               : "memory", "cc");
+}
+
+#define arch_irqs_disabled_flags arch_irqs_disabled_flags
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+       return flags & IRQMASK_I_BIT;
+}
+
+#include <asm-generic/irqflags.h>
+
+#endif /* ifdef __KERNEL__ */
+#endif /* ifndef __ASM_IRQFLAGS_DEBUG_ARM_H */
similarity index 95%
rename from drivers/amlogic/debug/irqflags_debug.h
rename to drivers/amlogic/debug/irqflags_debug_arm64.h
index 3e06fa7..4074b78 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * drivers/amlogic/debug/irqflags_debug.h
+ * drivers/amlogic/debug/irqflags_debug_arm64.h
  *
  * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
  *
@@ -15,8 +15,8 @@
  *
  */
 
-#ifndef __ASM_IRQFLAGS_DEBUG_H
-#define __ASM_IRQFLAGS_DEBUG_H
+#ifndef __ASM_IRQFLAGS_DEBUG_ARM64_H
+#define __ASM_IRQFLAGS_DEBUG_ARM64_H
 
 #ifdef __KERNEL__
 
index 25f746b..353e71c 100644 (file)
@@ -6,10 +6,13 @@ void irq_trace_stop(unsigned long flag);
 void irq_trace_start(unsigned long flag);
 void pr_lockup_info(int cpu);
 void lockup_hook(int cpu);
-void isr_in_hook(unsigned int c, unsigned long *t, unsigned int i, void *a);
-void isr_out_hook(unsigned int cpu, unsigned long tin, unsigned int irq);
+void isr_in_hook(unsigned int c, unsigned long long *t,
+       unsigned int i, void *a);
+void isr_out_hook(unsigned int cpu, unsigned long long tin, unsigned int irq);
 void irq_trace_en(int en);
-void sirq_in_hook(unsigned int cpu, unsigned long *tin, void *p);
-void sirq_out_hook(unsigned int cpu, unsigned long tin, void *p);
+void sirq_in_hook(unsigned int cpu, unsigned long long *tin, void *p);
+void sirq_out_hook(unsigned int cpu, unsigned long long tin, void *p);
 void aml_wdt_disable_dbg(void);
+void  notrace __arch_cpu_idle_enter(void);
+void  notrace __arch_cpu_idle_exit(void);
 #endif
index 0b4c21c..591ffe1 100644 (file)
@@ -766,7 +766,7 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
        if (likely(action)) {
 
 #ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP
-               unsigned long tin;
+               unsigned long long tin;
                unsigned int cpu = smp_processor_id();
 
                isr_in_hook(cpu, &tin, irq, action->handler);
index 063553f..95a322a 100644 (file)
@@ -142,7 +142,7 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
                irqreturn_t res;
 
 #ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP
-               unsigned long tin;
+               unsigned long long tin;
                unsigned int cpu = smp_processor_id();
 
                isr_in_hook(cpu, &tin, irq, action->handler);
index 9c672ba..44f0f5c 100644 (file)
@@ -255,7 +255,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
        int softirq_bit;
 #ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP
        int cpu;
-       unsigned long tin;
+       unsigned long long tin;
 #endif
 
        /*