From 91d7a0007aa40ad08fe8a642bb2b1525c448e729 Mon Sep 17 00:00:00 2001 From: "jianxin.pan" Date: Sat, 20 Jan 2018 15:24:50 +0800 Subject: [PATCH] debug: hard lockup detect PD#165796: detect dead lock in isr and dead lock with irq disabled Change-Id: I0aace28fd29e5aeb8c514f6ab1700ac1248f836c Signed-off-by: jianxin.pan --- MAINTAINERS | 3 + arch/arm64/configs/meson64_defconfig | 2 + arch/arm64/include/asm/irqflags.h | 4 + drivers/amlogic/Kconfig | 1 + drivers/amlogic/Makefile | 2 + drivers/amlogic/debug/Kconfig | 15 ++ drivers/amlogic/debug/Makefile | 1 + drivers/amlogic/debug/debug_lockup.c | 356 +++++++++++++++++++++++++++++++++ drivers/amlogic/debug/irqflags_debug.h | 125 ++++++++++++ drivers/amlogic/watchdog/meson_wdt.c | 23 ++- include/linux/amlogic/debug_lockup.h | 15 ++ kernel/irq/chip.c | 10 + kernel/irq/handle.c | 9 + kernel/softirq.c | 11 + kernel/sysctl.c | 2 +- kernel/watchdog.c | 18 ++ kernel/watchdog_hld.c | 6 + 17 files changed, 600 insertions(+), 3 deletions(-) create mode 100644 drivers/amlogic/debug/Kconfig create mode 100644 drivers/amlogic/debug/Makefile create mode 100644 drivers/amlogic/debug/debug_lockup.c create mode 100644 drivers/amlogic/debug/irqflags_debug.h create mode 100644 include/linux/amlogic/debug_lockup.h diff --git a/MAINTAINERS b/MAINTAINERS index b35f547..a818a8a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14446,3 +14446,6 @@ M: Jihong Sui F: include/uapi/linux/dvb/aml_demod.h F: drivers/amlogic/media/aml_demod/* +AMLOGIC DEBUG +M: Jianxin Pan +F: drivers/amlogic/debug/ diff --git a/arch/arm64/configs/meson64_defconfig b/arch/arm64/configs/meson64_defconfig index 2634bb1..26b4bdc 100644 --- a/arch/arm64/configs/meson64_defconfig +++ b/arch/arm64/configs/meson64_defconfig @@ -345,6 +345,8 @@ CONFIG_AMLOGIC_DDR_BANDWIDTH=y CONFIG_AMLOGIC_TEE=y CONFIG_AMLOGIC_GPIO_IRQ=y CONFIG_AMLOGIC_ATV_DEMOD=y +CONFIG_AMLOGIC_DEBUG=y +CONFIG_AMLOGIC_DEBUG_LOCKUP=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 8c581281..81db6c9 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -20,6 +20,9 @@ #include +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP +#include <../drivers/amlogic/debug/irqflags_debug.h> +#else /* * CPU interrupt mask handling. */ @@ -112,3 +115,4 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) #endif #endif +#endif diff --git a/drivers/amlogic/Kconfig b/drivers/amlogic/Kconfig index 01a6155..1c66995 100644 --- a/drivers/amlogic/Kconfig +++ b/drivers/amlogic/Kconfig @@ -128,5 +128,6 @@ source "drivers/amlogic/irqchip/Kconfig" source "drivers/amlogic/atv_demod/Kconfig" +source "drivers/amlogic/debug/Kconfig" endmenu endif diff --git a/drivers/amlogic/Makefile b/drivers/amlogic/Makefile index 1979244..fcc0523 100644 --- a/drivers/amlogic/Makefile +++ b/drivers/amlogic/Makefile @@ -118,3 +118,5 @@ obj-$(CONFIG_AMLOGIC_LEDRING) += ledring/ obj-$(CONFIG_AMLOGIC_GPIO_IRQ) += irqchip/ obj-$(CONFIG_AMLOGIC_ATV_DEMOD) += atv_demod/ + +obj-$(CONFIG_AMLOGIC_DEBUG) += debug/ diff --git a/drivers/amlogic/debug/Kconfig b/drivers/amlogic/debug/Kconfig new file mode 100644 index 0000000..34af209 --- /dev/null +++ b/drivers/amlogic/debug/Kconfig @@ -0,0 +1,15 @@ +menuconfig AMLOGIC_DEBUG + bool "AMLOGIC kernel debug" + default n + help + Amlogic kernel debug + +config AMLOGIC_DEBUG_LOCKUP + bool "Amlogic Kernel Lockup debug" + depends on AMLOGIC_DEBUG + depends on HARDLOCKUP_DETECTOR + default n + help + Debug lockup in isr and deaklock whit irq disabled. + When enable this config, Watchdog should be disabled. + diff --git a/drivers/amlogic/debug/Makefile b/drivers/amlogic/debug/Makefile new file mode 100644 index 0000000..2f07bf9 --- /dev/null +++ b/drivers/amlogic/debug/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_AMLOGIC_DEBUG_LOCKUP) += debug_lockup.o diff --git a/drivers/amlogic/debug/debug_lockup.c b/drivers/amlogic/debug/debug_lockup.c new file mode 100644 index 0000000..b4b29ae --- /dev/null +++ b/drivers/amlogic/debug/debug_lockup.c @@ -0,0 +1,356 @@ +/* + * drivers/amlogic/debug/debug_lockup.c + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include "../kernel/sched/sched.h" +#include +#include +#include +#include + +/*isr trace*/ +#define ns2ms (1000 * 1000) +#define LONG_ISR (500 * ns2ms) +#define LONG_SIRQ (500 * ns2ms) +#define CHK_WINDOW (1000 * ns2ms) +#define IRQ_CNT 256 +#define CCCNT_WARN 1000 +#define CPU 8 +static unsigned long t_base[IRQ_CNT]; +static unsigned long t_isr[IRQ_CNT]; +static unsigned long t_total[IRQ_CNT]; +static unsigned int cnt_total[IRQ_CNT]; +static void *cpu_action[CPU]; +static int cpu_irq[CPU] = {0}; +static void *cpu_sirq[CPU] = {NULL}; + +/*irq disable trace*/ +#define LONG_IRQDIS (1000 * 1000000) /*500 ms*/ +#define OUT_WIN (500 * 1000000) /*500 ms*/ +#define LONG_IDLE (5000000000) /*5 sec*/ +#define ENTRY 10 +static unsigned long t_i_d[CPU]; +static int irq_flg; +static struct stack_trace irq_trace[CPU]; +static unsigned long t_entrys[CPU][ENTRY]; +static unsigned long t_idle[CPU] = { 0 }; +static unsigned long t_d_out; + +static unsigned long isr_thr = LONG_ISR; +core_param(isr_thr, isr_thr, ulong, 0644); +static unsigned long irq_dis_thr = LONG_IRQDIS; +core_param(irq_dis_thr, irq_dis_thr, ulong, 0644); +static unsigned long sirq_thr = LONG_SIRQ; +core_param(sirq_thr, sirq_thr, ulong, 0644); +static int irq_check_en = 1; +core_param(irq_check_en, irq_check_en, int, 0644); +static int isr_check_en = 1; +core_param(isr_check_en, isr_check_en, int, 0644); +static unsigned long out_thr = OUT_WIN; +core_param(out_thr, out_thr, ulong, 0644); + +void notrace isr_in_hook(unsigned int cpu, unsigned long *tin, + unsigned int irq, void *act) +{ + if (irq >= IRQ_CNT || !isr_check_en || oops_in_progress) + return; + cpu_irq[cpu] = irq; + cpu_action[cpu] = act; + *tin = sched_clock(); + if (*tin >= CHK_WINDOW + t_base[irq]) { + t_base[irq] = *tin; + t_isr[irq] = 0; + t_total[irq] = 0; + cnt_total[irq] = 0; + } +} + +void notrace isr_out_hook(unsigned int cpu, unsigned long tin, unsigned int irq) +{ + unsigned long tout; + unsigned int ratio = 0; + + if (!isr_check_en || oops_in_progress) + return; + if (irq >= IRQ_CNT || cpu_irq[cpu] <= 0) + return; + cpu_irq[cpu] = 0; + tout = sched_clock(); + t_isr[irq] += (tout > tin) ? (tout-tin) : 0; + t_total[irq] = (tout-t_base[irq]); + cnt_total[irq]++; + + if (tout > isr_thr + tin) + pr_err("ISR_Long___ERR. irq:%d tout-tin:%ld ms\n", + irq, (tout - tin) / ns2ms); + + if (t_total[irq] < CHK_WINDOW) + return; + + ratio = t_isr[irq] * 100 / t_total[irq]; + if (ratio >= 35) { + pr_err("IRQRatio___ERR.irq:%d ratio:%d\n", irq, (int)ratio); + pr_err("t_isr:%d t_total:%d, cnt:%d\n", + (int)(t_isr[irq] / ns2ms), + (int)(t_total[irq] / ns2ms), + cnt_total[irq]); + } + t_base[irq] = sched_clock(); + t_isr[irq] = 0; + t_total[irq] = 0; + cnt_total[irq] = 0; + cpu_action[cpu] = NULL; +} + +void notrace sirq_in_hook(unsigned int cpu, unsigned long *tin, void *p) +{ + cpu_sirq[cpu] = p; + *tin = sched_clock(); +} +void notrace sirq_out_hook(unsigned int cpu, unsigned long tin, void *p) +{ + unsigned long tout = sched_clock(); + + if (cpu_sirq[cpu] && tin && (tout > tin + sirq_thr) && + !oops_in_progress) { + pr_err("SIRQLong___ERR. sirq:%p tout-tin:%ld ms\n", + p, (tout - tin) / ns2ms); + } + cpu_sirq[cpu] = NULL; +} + +void notrace irq_trace_start(unsigned long flags) +{ + unsigned int cpu; + int softirq = 0; + + if (!irq_flg || !irq_check_en || oops_in_progress) + return; + if (arch_irqs_disabled_flags(flags)) + return; + + cpu = get_cpu(); + put_cpu(); + softirq = task_thread_info(current)->preempt_count & SOFTIRQ_MASK; + if ((t_idle[cpu] && !softirq) || t_i_d[cpu] || cpu_is_offline(cpu) || + (softirq_count() && !cpu_sirq[cpu])) + return; + + memset(&irq_trace[cpu], 0, sizeof(irq_trace[cpu])); + memset(&t_entrys[cpu][0], 0, sizeof(t_entrys[cpu][0])*ENTRY); + irq_trace[cpu].entries = &t_entrys[cpu][0]; + irq_trace[cpu].max_entries = ENTRY; + irq_trace[cpu].skip = 2; + irq_trace[cpu].nr_entries = 0; + t_i_d[cpu] = sched_clock(); + save_stack_trace(&irq_trace[cpu]); +} +EXPORT_SYMBOL(irq_trace_start); + +void notrace irq_trace_stop(unsigned long flag) +{ + unsigned long t_i_e, t; + unsigned int cpu; + int softirq = 0; + static int out_cnt; + + if (!irq_check_en || !irq_flg || oops_in_progress) + return; + + if (arch_irqs_disabled_flags(flag)) + return; + + cpu = get_cpu(); + put_cpu(); + if (!t_i_d[cpu] || + !arch_irqs_disabled_flags(arch_local_save_flags())) { + t_i_d[cpu] = 0; + return; + } + + t_i_e = sched_clock(); + if (t_i_e < t_i_d[cpu]) { + t_i_d[cpu] = 0; + return; + } + + t = (t_i_e - t_i_d[cpu]); + softirq = task_thread_info(current)->preempt_count & SOFTIRQ_MASK; + + if (!(t_idle[cpu] && !softirq) && (t > irq_dis_thr) && t_i_d[cpu] && + !(softirq_count() && !cpu_sirq[cpu])) { + out_cnt++; + if (t_i_e >= t_d_out + out_thr) { + t_d_out = t_i_e; + pr_err("\n\nDisIRQ___ERR:%ld ms <%ld %ld> %d:\n", + t / ns2ms, t_i_e / ns2ms, t_i_d[cpu] / ns2ms, + out_cnt); + print_stack_trace(&irq_trace[cpu], 0); + dump_stack(); + } + } + t_i_d[cpu] = 0; +} +EXPORT_SYMBOL(irq_trace_stop); + +void __attribute__((weak)) lockup_hook(int cpu) +{ +} + +void notrace arch_cpu_idle_enter(void) +{ + int cpu; + + if ((!irq_check_en || !irq_flg) && !isr_check_en) + return; + cpu = get_cpu(); + put_cpu(); + t_idle[cpu] = local_clock(); +} +void notrace arch_cpu_idle_exit(void) +{ + int cpu; + + if ((!irq_check_en || !irq_flg) && !isr_check_en) + return; + cpu = get_cpu(); + put_cpu(); + t_idle[cpu] = 0; +} + +void pr_lockup_info(int c) +{ + int cpu; + int virq = irq_check_en; + int visr = isr_check_en; + + irq_flg = 0; + irq_check_en = 0; + isr_check_en = 0; + console_loglevel = 7; + pr_err("\n\n\nHARDLOCKUP____ERR.CPU[%d] START\n", + c, virq, visr); + for_each_online_cpu(cpu) { + unsigned long t_cur = sched_clock(); + struct task_struct *p = (cpu_rq(cpu)->curr); + int preempt = task_thread_info(p)->preempt_count; + + pr_err("\ndump_cpu[%d] irq:%3d preempt:%x %s\n", + cpu, cpu_irq[cpu], preempt, p->comm); + if (preempt & HARDIRQ_MASK) + pr_err("IRQ %pf, %x\n", cpu_action[cpu], + (unsigned int)(preempt & HARDIRQ_MASK)); + if (preempt & SOFTIRQ_MASK) + pr_err("SotIRQ %pf, %x\n", cpu_sirq[cpu], + (unsigned int)(preempt & SOFTIRQ_MASK)); + + if (t_i_d[cpu]) { + pr_err("IRQ____ERR[%d]. <%ld %ld>.\n", + cpu, t_i_d[cpu] / ns2ms, + (t_cur-t_i_d[cpu]) / ns2ms); + print_stack_trace(&irq_trace[cpu], 0); + } + if (t_idle[cpu] && (t_idle[cpu] > LONG_IDLE + t_cur)) { + pr_err("CPU[%d] IdleLong____ERR:%ld ms <%ld %ld>\n", + cpu, (t_cur - t_idle[cpu]) / ns2ms, + t_cur / ns2ms, t_idle[cpu] / ns2ms); + } + dump_cpu_task(cpu); + lockup_hook(cpu); + } + pr_err("\nHARDLOCKUP____ERR.END\n\n"); +} + + + +static struct dentry *debug_lockup; +#define debug_fs(x) \ +static ssize_t x##_write(struct file *file, const char __user *userbuf,\ + size_t count, loff_t *ppos) \ +{ \ + char buf[20]; \ + unsigned long val; \ + int ret; \ + count = min_t(size_t, count, (sizeof(buf)-1)); \ + if (copy_from_user(buf, userbuf, count)) \ + return -EFAULT; \ + buf[count] = 0; \ + ret = sscanf(buf, "%ld", &val); \ + x = (typeof(x))val; \ + if (irq_check_en || isr_check_en) \ + aml_wdt_disable_dbg(); \ + pr_info("%s:%ld\n", __func__, (unsigned long)x); \ + return count; \ +} \ +static ssize_t x##_read(struct file *file, char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + char buf[20]; \ + unsigned long val; \ + ssize_t len; \ + val = (unsigned long)x; \ + len = snprintf(buf, sizeof(buf), "%ld\n", val); \ + pr_info("%s:%ld\n", __func__, val); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, len);\ +} \ +static const struct file_operations x##_debug_ops = { \ + .open = simple_open, \ + .read = x##_read, \ + .write = x##_write, \ +} +debug_fs(isr_thr); +debug_fs(irq_dis_thr); +debug_fs(sirq_thr); +debug_fs(irq_check_en); +debug_fs(isr_check_en); +debug_fs(out_thr); + +static int __init debug_lockup_init(void) +{ + debug_lockup = debugfs_create_dir("lockup", NULL); + if (IS_ERR_OR_NULL(debug_lockup)) { + pr_warn("failed to create debug_lockup\n"); + debug_lockup = NULL; + return -1; + } + debugfs_create_file("isr_thr", S_IFREG | 0664, + debug_lockup, NULL, &isr_thr_debug_ops); + debugfs_create_file("irq_dis_thr", S_IFREG | 0664, + debug_lockup, NULL, &irq_dis_thr_debug_ops); + debugfs_create_file("sirq_thr", S_IFREG | 0664, + debug_lockup, NULL, &sirq_thr_debug_ops); + debugfs_create_file("out_thr", S_IFREG | 0664, + debug_lockup, NULL, &out_thr_debug_ops); + debugfs_create_file("isr_check_en", S_IFREG | 0664, + debug_lockup, NULL, &isr_check_en_debug_ops); + debugfs_create_file("irq_check_en", S_IFREG | 0664, + debug_lockup, NULL, &irq_check_en_debug_ops); + if (irq_check_en || isr_check_en) + aml_wdt_disable_dbg(); + irq_flg = 1; + return 0; +} +late_initcall(debug_lockup_init); + +MODULE_DESCRIPTION("Amlogic debug lockup module"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jianxin Pan "); diff --git a/drivers/amlogic/debug/irqflags_debug.h b/drivers/amlogic/debug/irqflags_debug.h new file mode 100644 index 0000000..3e06fa7 --- /dev/null +++ b/drivers/amlogic/debug/irqflags_debug.h @@ -0,0 +1,125 @@ +/* + * drivers/amlogic/debug/irqflags_debug.h + * + * Copyright (C) 2017 Amlogic, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __ASM_IRQFLAGS_DEBUG_H +#define __ASM_IRQFLAGS_DEBUG_H + +#ifdef __KERNEL__ + +/* + * CPU interrupt mask handling. + */ +#include + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + asm volatile( + "mrs %0, daif // arch_local_irq_save\n" + "msr daifset, #2" + : "=r" (flags) + : + : "memory"); + irq_trace_start(flags); + return flags; +} + +static inline void arch_local_irq_enable(void) +{ + irq_trace_stop(0); + + asm volatile( + "msr daifclr, #2 // arch_local_irq_enable" + : + : + : "memory"); +} + +static inline void arch_local_irq_disable(void) +{ +#if 0 + asm volatile( + "msr daifset, #2 // arch_local_irq_disable" + : + : + : "memory"); +#endif + arch_local_irq_save(); +} + +#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory") +#define local_fiq_disable() asm("msr daifset, #1" : : : "memory") + +#define local_async_enable() asm("msr daifclr, #4" : : : "memory") +#define local_async_disable() asm("msr daifset, #4" : : : "memory") + +/* + * Save the current interrupt enable state. + */ +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + + asm volatile( + "mrs %0, daif // arch_local_save_flags" + : "=r" (flags) + : + : "memory"); + return flags; +} + +/* + * restore saved IRQ state + */ +static inline void arch_local_irq_restore(unsigned long flags) +{ + irq_trace_stop(flags); + asm volatile( + "msr daif, %0 // arch_local_irq_restore" + : + : "r" (flags) + : "memory"); +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return flags & PSR_I_BIT; +} + +/* + * save and restore debug state + */ +#define local_dbg_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + asm volatile( \ + "mrs %0, daif // local_dbg_save\n" \ + "msr daifset, #8" \ + : "=r" (flags) : : "memory"); \ + } while (0) + +#define local_dbg_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + asm volatile( \ + "msr daif, %0 // local_dbg_restore\n" \ + : : "r" (flags) : "memory"); \ + } while (0) + +#endif +#endif diff --git a/drivers/amlogic/watchdog/meson_wdt.c b/drivers/amlogic/watchdog/meson_wdt.c index c119e66..4da2517 100644 --- a/drivers/amlogic/watchdog/meson_wdt.c +++ b/drivers/amlogic/watchdog/meson_wdt.c @@ -56,7 +56,6 @@ struct aml_wdt_dev { struct notifier_block reboot_notifier; }; - static void aml_update_bits(void __iomem *reg, unsigned int mask, unsigned int val) { @@ -295,6 +294,24 @@ static struct notifier_block aml_wdt_reboot_notifier = { .notifier_call = aml_wtd_reboot_notify, }; +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP +/* HARDLOCKUP safe window: watchdog_thresh * 2 * /5 *3 *2 = 24 second*/ +#define HARDLOCKUP_WIN 30 +struct aml_wdt_dev *g_awdt; +void aml_wdt_disable_dbg(void) +{ + static int flg; + int cnt; + + if (!g_awdt || flg) + return; + cnt = readl(g_awdt->reg_base + TCNT) & 0xffff; + if (cnt < HARDLOCKUP_WIN * g_awdt->one_second) + cnt = HARDLOCKUP_WIN * g_awdt->one_second; + set_watchdog_cnt(g_awdt, cnt); +} +#endif + static int aml_wdt_probe(struct platform_device *pdev) { struct watchdog_device *aml_wdt; @@ -340,7 +357,9 @@ static int aml_wdt_probe(struct platform_device *pdev) register_pm_notifier(&wdev->pm_notifier); register_reboot_notifier(&wdev->reboot_notifier); dev_info(wdev->dev, "AML Watchdog Timer probed done\n"); - +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP + g_awdt = wdev; +#endif return 0; } diff --git a/include/linux/amlogic/debug_lockup.h b/include/linux/amlogic/debug_lockup.h new file mode 100644 index 0000000..25f746b --- /dev/null +++ b/include/linux/amlogic/debug_lockup.h @@ -0,0 +1,15 @@ +#ifndef __debug_lockup_h_ +#define __debug_lockup_h_ + + +void irq_trace_stop(unsigned long flag); +void irq_trace_start(unsigned long flag); +void pr_lockup_info(int cpu); +void lockup_hook(int cpu); +void isr_in_hook(unsigned int c, unsigned long *t, unsigned int i, void *a); +void isr_out_hook(unsigned int cpu, unsigned long tin, unsigned int irq); +void irq_trace_en(int en); +void sirq_in_hook(unsigned int cpu, unsigned long *tin, void *p); +void sirq_out_hook(unsigned int cpu, unsigned long tin, void *p); +void aml_wdt_disable_dbg(void); +#endif diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f30110e..0b4c21c 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -764,9 +764,19 @@ void handle_percpu_devid_irq(struct irq_desc *desc) chip->irq_ack(&desc->irq_data); if (likely(action)) { + +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP + unsigned long tin; + unsigned int cpu = smp_processor_id(); + + isr_in_hook(cpu, &tin, irq, action->handler); +#endif trace_irq_handler_entry(irq, action); res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); trace_irq_handler_exit(irq, action, res); +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP + isr_out_hook(cpu, tin, irq); +#endif } else { unsigned int cpu = smp_processor_id(); bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index d3f2490..063553f 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -141,9 +141,18 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags for_each_action_of_desc(desc, action) { irqreturn_t res; +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP + unsigned long tin; + unsigned int cpu = smp_processor_id(); + + isr_in_hook(cpu, &tin, irq, action->handler); +#endif trace_irq_handler_entry(irq, action); res = action->handler(irq, action->dev_id); trace_irq_handler_exit(irq, action, res); +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP + isr_out_hook(cpu, tin, irq); +#endif if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n", irq, action->handler)) diff --git a/kernel/softirq.c b/kernel/softirq.c index 744fa61..8c832fb 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -247,6 +247,10 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) bool in_hardirq; __u32 pending; int softirq_bit; +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP + int cpu; + unsigned long tin; +#endif /* * Mask out PF_MEMALLOC s current task context is borrowed for the @@ -281,7 +285,14 @@ restart: kstat_incr_softirqs_this_cpu(vec_nr); trace_softirq_entry(vec_nr); +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP + cpu = smp_processor_id(); + sirq_in_hook(cpu, &tin, (void *)h->action); +#endif h->action(h); +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP + sirq_out_hook(cpu, tin, (void *)h->action); +#endif trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3d10a47..a21e2c4 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -969,7 +969,7 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one, }, -#ifdef CONFIG_HARDLOCKUP_DETECTOR +#if defined(CONFIG_HARDLOCKUP_DETECTOR) { .procname = "hardlockup_panic", .data = &hardlockup_panic, diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 0b3638c..9df5ac2 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -26,6 +26,8 @@ #include #include +#include + static DEFINE_MUTEX(watchdog_proc_mutex); #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) @@ -241,6 +243,22 @@ static void watchdog_interrupt_count(void) __this_cpu_inc(hrtimer_interrupts); } +unsigned long watchdog_get_interrupt_count_cpu(int cpu) +{ + return per_cpu(hrtimer_interrupts, cpu); +} +/* + * These two functions are mostly architecture specific + * defining them as weak here. + */ +int __weak watchdog_nmi_enable(unsigned int cpu) +{ + return 0; +} +void __weak watchdog_nmi_disable(unsigned int cpu) +{ +} + static int watchdog_enable_all_cpus(void); static void watchdog_disable_all_cpus(void); diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index ac528e1..9a42b8b 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -129,6 +129,9 @@ static void watchdog_overflow_callback(struct perf_event *event, !test_and_set_bit(0, &hardlockup_allcpu_dumped)) trigger_allbutself_cpu_backtrace(); +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP + pr_lockup_info(next_cpu); +#endif if (hardlockup_panic) nmi_panic(regs, "Hard LOCKUP"); @@ -198,6 +201,9 @@ void watchdog_check_hardlockup_other_cpu(void) if (per_cpu(hard_watchdog_warn, next_cpu) == true) return; +#ifdef CONFIG_AMLOGIC_DEBUG_LOCKUP + pr_lockup_info(next_cpu); +#endif if (hardlockup_panic) panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu); -- 2.7.4