6 #include <linux/errno.h>
7 #include <linux/ctype.h>
8 #include <linux/notifier.h>
9 #include <linux/reboot.h>
10 #include <linux/input.h>
11 #include <linux/delay.h>
12 #include <linux/sysrq.h>
13 #include <mach/regs-pmu.h>
14 #include <asm/cacheflush.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/uaccess.h>
21 #include <linux/proc_fs.h>
23 #include <mach/system.h>
24 #include <mach/sec_debug.h>
26 enum sec_debug_upload_cause_t {
27 UPLOAD_CAUSE_INIT = 0xCAFEBABE,
28 UPLOAD_CAUSE_KERNEL_PANIC = 0x000000C8,
29 UPLOAD_CAUSE_FORCED_UPLOAD = 0x00000022,
30 UPLOAD_CAUSE_CP_ERROR_FATAL = 0x000000CC,
31 UPLOAD_CAUSE_USER_FAULT = 0x0000002F,
32 UPLOAD_CAUSE_HSIC_DISCONNECTED = 0x000000DD,
35 struct sec_debug_mmu_reg_t {
56 /* ARM CORE regs mapping structure */
57 struct sec_debug_core_t {
76 unsigned int spsr_svc;
94 unsigned int spsr_fiq;
99 unsigned int spsr_irq;
102 unsigned int r13_mon;
103 unsigned int r14_mon;
104 unsigned int spsr_mon;
107 unsigned int r13_abt;
108 unsigned int r14_abt;
109 unsigned int spsr_abt;
112 unsigned int r13_und;
113 unsigned int r14_und;
114 unsigned int spsr_und;
118 /* enable sec_debug feature */
119 static unsigned enable = 1;
120 static unsigned enable_user = 0;
121 module_param_named(enable, enable, uint, 0644);
122 module_param_named(enable_user, enable_user, uint, 0644);
124 static const char *gkernel_sec_build_info_date_time[] ={
129 static char gkernel_sec_build_info[100];
131 /* klaatu - schedule log */
132 #ifdef CONFIG_SEC_DEBUG_SCHED_LOG
133 static struct sched_log gExcpTaskLog[SCHED_LOG_MAX] __cacheline_aligned;
134 static atomic_t gExcpTaskLogIdx = ATOMIC_INIT(-1);
136 /* On hardlockup case, CPU1's log might flood out CPU0's logs. So
137 let's keep last_task/last_irq at least. Skip PER_CPU FORCED_UPLOAD
138 tracing convenience */
139 struct sched_log last_task[2];
140 struct sched_log last_irq[2];
143 /* klaatu - semaphore log */
144 #ifdef CONFIG_SEC_DEBUG_SEMAPHORE_LOG
145 struct sem_debug sem_debug_free_head;
146 struct sem_debug sem_debug_done_head;
147 int sem_debug_free_head_cnt;
148 int sem_debug_done_head_cnt;
149 int sem_debug_init = 0;
150 spinlock_t sem_debug_lock;
152 /* rwsemaphore logging */
153 struct rwsem_debug rwsem_debug_free_head;
154 struct rwsem_debug rwsem_debug_done_head;
155 int rwsem_debug_free_head_cnt;
156 int rwsem_debug_done_head_cnt;
157 int rwsem_debug_init = 0;
158 spinlock_t rwsem_debug_lock;
160 #endif /* CONFIG_SEC_DEBUG_SEMAPHORE_LOG */
162 DEFINE_PER_CPU(struct sec_debug_core_t, sec_debug_core_reg);
163 DEFINE_PER_CPU(struct sec_debug_mmu_reg_t, sec_debug_mmu_reg);
164 DEFINE_PER_CPU(enum sec_debug_upload_cause_t, sec_debug_upload_cause);
166 /* core reg dump function*/
167 static void sec_debug_save_core_reg(struct sec_debug_core_t *core_reg)
169 /* we will be in SVC mode when we enter this function. Collect
170 SVC registers along with cmn registers. */
171 asm("str r0, [%0,#0]\n\t" /* R0 */
172 "str r1, [%0,#4]\n\t" /* R1 */
173 "str r2, [%0,#8]\n\t" /* R2 */
174 "str r3, [%0,#12]\n\t" /* R3 */
175 "str r4, [%0,#16]\n\t" /* R4 */
176 "str r5, [%0,#20]\n\t" /* R5 */
177 "str r6, [%0,#24]\n\t" /* R6 */
178 "str r7, [%0,#28]\n\t" /* R7 */
179 "str r8, [%0,#32]\n\t" /* R8 */
180 "str r9, [%0,#36]\n\t" /* R9 */
181 "str r10, [%0,#40]\n\t" /* R10 */
182 "str r11, [%0,#44]\n\t" /* R11 */
183 "str r12, [%0,#48]\n\t" /* R12 */
185 "str r13, [%0,#52]\n\t" /* R13_SVC */
186 "str r14, [%0,#56]\n\t" /* R14_SVC */
187 "mrs r1, spsr\n\t" /* SPSR_SVC */
188 "str r1, [%0,#60]\n\t"
190 "sub r1, r15, #0x4\n\t" /* PC */
191 "str r1, [%0,#64]\n\t"
192 "mrs r1, cpsr\n\t" /* CPSR */
193 "str r1, [%0,#68]\n\t"
195 "mrs r1, cpsr\n\t" /* switch to SYS mode */
196 "and r1, r1, #0xFFFFFFE0\n\t"
197 "orr r1, r1, #0x1f\n\t"
199 "str r13, [%0,#72]\n\t" /* R13_USR */
200 "str r14, [%0,#76]\n\t" /* R14_USR */
202 "mrs r1, cpsr\n\t" /* switch to FIQ mode */
203 "and r1,r1,#0xFFFFFFE0\n\t"
204 "orr r1,r1,#0x11\n\t"
206 "str r8, [%0,#80]\n\t" /* R8_FIQ */
207 "str r9, [%0,#84]\n\t" /* R9_FIQ */
208 "str r10, [%0,#88]\n\t" /* R10_FIQ */
209 "str r11, [%0,#92]\n\t" /* R11_FIQ */
210 "str r12, [%0,#96]\n\t" /* R12_FIQ */
211 "str r13, [%0,#100]\n\t" /* R13_FIQ */
212 "str r14, [%0,#104]\n\t" /* R14_FIQ */
213 "mrs r1, spsr\n\t" /* SPSR_FIQ */
214 "str r1, [%0,#108]\n\t"
216 "mrs r1, cpsr\n\t" /* switch to IRQ mode */
217 "and r1, r1, #0xFFFFFFE0\n\t"
218 "orr r1, r1, #0x12\n\t"
220 "str r13, [%0,#112]\n\t" /* R13_IRQ */
221 "str r14, [%0,#116]\n\t" /* R14_IRQ */
222 "mrs r1, spsr\n\t" /* SPSR_IRQ */
223 "str r1, [%0,#120]\n\t"
225 "mrs r1, cpsr\n\t" /* switch to monitor mode */
226 "and r1, r1, #0xFFFFFFE0\n\t"
227 "orr r1, r1, #0x16\n\t"
229 "str r13, [%0,#124]\n\t" /* R13_MON */
230 "str r14, [%0,#128]\n\t" /* R14_MON */
231 "mrs r1, spsr\n\t" /* SPSR_MON */
232 "str r1, [%0,#132]\n\t"
234 "mrs r1, cpsr\n\t" /* switch to Abort mode */
235 "and r1, r1, #0xFFFFFFE0\n\t"
236 "orr r1, r1, #0x17\n\t"
238 "str r13, [%0,#136]\n\t" /* R13_ABT */
239 "str r14, [%0,#140]\n\t" /* R14_ABT */
240 "mrs r1, spsr\n\t" /* SPSR_ABT */
241 "str r1, [%0,#144]\n\t"
243 "mrs r1, cpsr\n\t" /* switch to undef mode */
244 "and r1, r1, #0xFFFFFFE0\n\t"
245 "orr r1, r1, #0x1B\n\t"
247 "str r13, [%0,#148]\n\t" /* R13_UND */
248 "str r14, [%0,#152]\n\t" /* R14_UND */
249 "mrs r1, spsr\n\t" /* SPSR_UND */
250 "str r1, [%0,#156]\n\t"
251 /* restore to SVC mode */
252 "mrs r1, cpsr\n\t" /* switch to undef mode */
253 "and r1, r1, #0xFFFFFFE0\n\t"
254 "orr r1, r1, #0x13\n\t"
255 "msr cpsr,r1\n\t" : /* output */
256 : "r"(core_reg) /* input */
257 : "%r1" /* clobbered register */
263 static void sec_debug_save_mmu_reg(struct sec_debug_mmu_reg_t *mmu_reg)
265 asm("mrc p15, 0, r1, c1, c0, 0\n\t" /* SCTLR */
267 "mrc p15, 0, r1, c2, c0, 0\n\t" /* TTBR0 */
268 "str r1, [%0,#4]\n\t"
269 "mrc p15, 0, r1, c2, c0,1\n\t" /* TTBR1 */
270 "str r1, [%0,#8]\n\t"
271 "mrc p15, 0, r1, c2, c0,2\n\t" /* TTBCR */
272 "str r1, [%0,#12]\n\t"
273 "mrc p15, 0, r1, c3, c0,0\n\t" /* DACR */
274 "str r1, [%0,#16]\n\t"
275 "mrc p15, 0, r1, c5, c0,0\n\t" /* DFSR */
276 "str r1, [%0,#20]\n\t"
277 "mrc p15, 0, r1, c6, c0,0\n\t" /* DFAR */
278 "str r1, [%0,#24]\n\t"
279 "mrc p15, 0, r1, c5, c0,1\n\t" /* IFSR */
280 "str r1, [%0,#28]\n\t"
281 "mrc p15, 0, r1, c6, c0,2\n\t" /* IFAR */
282 "str r1, [%0,#32]\n\t"
283 /* Don't populate DAFSR and RAFSR */
284 "mrc p15, 0, r1, c10, c2,0\n\t" /* PMRRR */
285 "str r1, [%0,#44]\n\t"
286 "mrc p15, 0, r1, c10, c2,1\n\t" /* NMRRR */
287 "str r1, [%0,#48]\n\t"
288 "mrc p15, 0, r1, c13, c0,0\n\t" /* FCSEPID */
289 "str r1, [%0,#52]\n\t"
290 "mrc p15, 0, r1, c13, c0,1\n\t" /* CONTEXT */
291 "str r1, [%0,#56]\n\t"
292 "mrc p15, 0, r1, c13, c0,2\n\t" /* URWTPID */
293 "str r1, [%0,#60]\n\t"
294 "mrc p15, 0, r1, c13, c0,3\n\t" /* UROTPID */
295 "str r1, [%0,#64]\n\t"
296 "mrc p15, 0, r1, c13, c0,4\n\t" /* POTPIDR */
297 "str r1, [%0,#68]\n\t" : /* output */
298 : "r"(mmu_reg) /* input */
299 : "%r1", "memory" /* clobbered register */
303 static void sec_debug_save_context(void)
306 local_irq_save(flags);
307 sec_debug_save_mmu_reg(&per_cpu(sec_debug_mmu_reg, smp_processor_id()));
308 sec_debug_save_core_reg(&per_cpu
309 (sec_debug_core_reg, smp_processor_id()));
311 pr_emerg("(%s) context saved(CPU:%d)\n", __func__, smp_processor_id());
312 local_irq_restore(flags);
315 static void sec_debug_set_upload_magic(unsigned magic)
317 pr_emerg("(%s) %x\n", __func__, magic);
319 *(unsigned int *)0xc0000000 = magic;
326 static int sec_debug_normal_reboot_handler(struct notifier_block *nb,
327 unsigned long l, void *p)
329 sec_debug_set_upload_magic(0x0);
334 static void sec_debug_set_upload_cause(enum sec_debug_upload_cause_t type)
336 per_cpu(sec_debug_upload_cause, smp_processor_id()) = type;
338 /* to check VDD_ALIVE / XnRESET issue */
339 __raw_writel(type, S5P_INFORM3);
340 __raw_writel(type, S5P_INFORM4);
341 __raw_writel(type, S5P_INFORM6);
343 pr_emerg("(%s) %x\n", __func__, type);
346 static void sec_debug_hw_reset(void)
348 pr_emerg("(%s) %s\n", __func__, gkernel_sec_build_info);
349 pr_emerg("(%s) rebooting...\n", __func__);
360 static int sec_debug_panic_handler(struct notifier_block *nb,
361 unsigned long l, void *buf)
366 sec_debug_set_upload_magic(0x66262564);
368 if (!strcmp(buf, "User Fault"))
369 sec_debug_set_upload_cause(UPLOAD_CAUSE_USER_FAULT);
370 else if (!strcmp(buf, "Crash Key"))
371 sec_debug_set_upload_cause(UPLOAD_CAUSE_FORCED_UPLOAD);
372 else if (!strcmp(buf, "CP Crash"))
373 sec_debug_set_upload_cause(UPLOAD_CAUSE_CP_ERROR_FATAL);
374 else if (!strcmp(buf, "HSIC Disconnected"))
375 sec_debug_set_upload_cause(UPLOAD_CAUSE_HSIC_DISCONNECTED);
377 sec_debug_set_upload_cause(UPLOAD_CAUSE_KERNEL_PANIC);
379 handle_sysrq('t', NULL);
381 sec_debug_dump_stack();
382 sec_debug_hw_reset();
388 * Called from dump_stack()
389 * This function call does not necessarily mean that a fatal error
390 * had occurred. It may be just a warning.
392 int sec_debug_dump_stack(void)
397 sec_debug_save_context();
399 /* flush L1 from each core.
400 L2 will be flushed later before reset. */
406 void sec_debug_check_crash_key(unsigned int code, int value)
408 static enum { NONE, HOME_DOWN } state = NONE;
409 static unsigned long home_down_jiffies;
414 //pr_info("%s: %d %d\n", __func__, code, value);
416 if (code == KEY_HOME) {
419 home_down_jiffies = jiffies;
422 } else if (code == KEY_VOLUMEUP) {
424 if (state == HOME_DOWN) {
425 pr_err("%s: %u msec after home down\n",
427 jiffies_to_msecs(jiffies -
431 /* else do nothing */
438 static struct notifier_block nb_reboot_block = {
439 .notifier_call = sec_debug_normal_reboot_handler
442 static struct notifier_block nb_panic_block = {
443 .notifier_call = sec_debug_panic_handler,
446 static void sec_debug_set_build_info(void)
448 char *p = gkernel_sec_build_info;
449 sprintf(p, "Kernel Build Info : ");
451 strncat(p, gkernel_sec_build_info_date_time[0],12);
453 strncat(p, gkernel_sec_build_info_date_time[1],9);
456 __init int sec_debug_init(void)
461 sec_debug_set_build_info();
463 sec_debug_set_upload_magic(0x66262564);
464 sec_debug_set_upload_cause(UPLOAD_CAUSE_INIT);
466 register_reboot_notifier(&nb_reboot_block);
468 atomic_notifier_chain_register(&panic_notifier_list,
474 /* klaatu - schedule log */
475 #ifdef CONFIG_SEC_DEBUG_SCHED_LOG
476 void check_supers_nolock(void)
478 struct super_block *sb, *n;
480 list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
481 BUG_ON((unsigned)sb->s_op->write_super == 0x1234);
485 void sec_debug_task_sched_log(int cpu, struct task_struct *task)
487 unsigned i = atomic_inc_return(&gExcpTaskLogIdx) & (SCHED_LOG_MAX - 1);
488 gExcpTaskLog[i].time = cpu_clock(cpu);
489 strcpy(gExcpTaskLog[i].log.task.comm, task->comm);
490 gExcpTaskLog[i].log.task.pid = task->pid;
491 gExcpTaskLog[i].log.task.cpu = cpu;
493 memcpy(&last_task[cpu & 1], &gExcpTaskLog[i], sizeof(gExcpTaskLog[0]));
495 /* check_supers_nolock(); */
498 void sec_debug_irq_sched_log(void *fn)
500 unsigned i = atomic_inc_return(&gExcpTaskLogIdx) & (SCHED_LOG_MAX - 1);
501 int cpu = smp_processor_id();
502 gExcpTaskLog[i].time = cpu_clock(cpu);
503 gExcpTaskLog[i].log.irq.cpu = cpu;
504 gExcpTaskLog[i].log.irq.fn = (void *)fn;
506 memcpy(&last_irq[cpu & 1], &gExcpTaskLog[i], sizeof(gExcpTaskLog[0]));
508 /* check_supers_nolock(); */
510 #endif /* CONFIG_SEC_DEBUG_SCHED_LOG */
512 /* klaatu - semaphore log */
513 #ifdef CONFIG_SEC_DEBUG_SEMAPHORE_LOG
514 void debug_semaphore_init(void)
517 struct sem_debug *sem_debug = NULL;
519 spin_lock_init(&sem_debug_lock);
520 sem_debug_free_head_cnt = 0;
521 sem_debug_done_head_cnt = 0;
523 /* initialize list head of sem_debug */
524 INIT_LIST_HEAD(&sem_debug_free_head.list);
525 INIT_LIST_HEAD(&sem_debug_done_head.list);
527 for (i = 0; i < SEMAPHORE_LOG_MAX ; i++) {
528 /* malloc semaphore */
529 sem_debug = kmalloc(sizeof(struct sem_debug), GFP_KERNEL);
531 list_add(&sem_debug->list, &sem_debug_free_head.list);
532 sem_debug_free_head_cnt++;
538 void debug_semaphore_down_log(struct semaphore *sem)
540 struct list_head *tmp;
541 struct sem_debug *sem_dbg;
547 spin_lock_irqsave(&sem_debug_lock, flags);
548 list_for_each(tmp, &sem_debug_free_head.list)
550 sem_dbg = list_entry(tmp, struct sem_debug, list);
551 sem_dbg->task = current;
553 /* strcpy(sem_dbg->comm,current->group_leader->comm); */
554 sem_dbg->pid = current->pid;
555 sem_dbg->cpu = smp_processor_id();
556 list_del(&sem_dbg->list);
557 list_add(&sem_dbg->list, &sem_debug_done_head.list);
558 sem_debug_free_head_cnt--;
559 sem_debug_done_head_cnt++;
562 spin_unlock_irqrestore(&sem_debug_lock, flags);
565 void debug_semaphore_up_log(struct semaphore *sem)
567 struct list_head *tmp;
568 struct sem_debug *sem_dbg;
574 spin_lock_irqsave(&sem_debug_lock, flags);
575 list_for_each(tmp, &sem_debug_done_head.list)
577 sem_dbg = list_entry(tmp, struct sem_debug, list);
578 if (sem_dbg->sem == sem && sem_dbg->pid == current->pid) {
579 list_del(&sem_dbg->list);
580 list_add(&sem_dbg->list, &sem_debug_free_head.list);
581 sem_debug_free_head_cnt++;
582 sem_debug_done_head_cnt--;
586 spin_unlock_irqrestore(&sem_debug_lock, flags);
589 /* rwsemaphore logging */
590 void debug_rwsemaphore_init(void)
593 struct rwsem_debug *rwsem_debug = NULL;
595 spin_lock_init(&rwsem_debug_lock);
596 rwsem_debug_free_head_cnt = 0;
597 rwsem_debug_done_head_cnt = 0;
599 /* initialize list head of sem_debug */
600 INIT_LIST_HEAD(&rwsem_debug_free_head.list);
601 INIT_LIST_HEAD(&rwsem_debug_done_head.list);
603 for (i = 0; i < RWSEMAPHORE_LOG_MAX ; i++) {
604 /* malloc semaphore */
605 rwsem_debug = kmalloc(sizeof(struct rwsem_debug), GFP_KERNEL);
607 list_add(&rwsem_debug->list, &rwsem_debug_free_head.list);
608 rwsem_debug_free_head_cnt++;
611 rwsem_debug_init = 1;
614 void debug_rwsemaphore_down_log(struct rw_semaphore *sem, int dir)
616 struct list_head *tmp;
617 struct rwsem_debug *sem_dbg;
620 if (!rwsem_debug_init)
623 spin_lock_irqsave(&rwsem_debug_lock, flags);
624 list_for_each(tmp, &rwsem_debug_free_head.list)
626 sem_dbg = list_entry(tmp, struct rwsem_debug, list);
627 sem_dbg->task = current;
629 /* strcpy(sem_dbg->comm,current->group_leader->comm); */
630 sem_dbg->pid = current->pid;
631 sem_dbg->cpu = smp_processor_id();
632 sem_dbg->direction = dir;
633 list_del(&sem_dbg->list);
634 list_add(&sem_dbg->list, &rwsem_debug_done_head.list);
635 rwsem_debug_free_head_cnt--;
636 rwsem_debug_done_head_cnt++;
639 spin_unlock_irqrestore(&rwsem_debug_lock, flags);
642 void debug_rwsemaphore_up_log(struct rw_semaphore *sem)
644 struct list_head *tmp;
645 struct rwsem_debug *sem_dbg;
648 if (!rwsem_debug_init)
651 spin_lock_irqsave(&rwsem_debug_lock, flags);
652 list_for_each(tmp, &rwsem_debug_done_head.list)
654 sem_dbg = list_entry(tmp, struct rwsem_debug, list);
655 if (sem_dbg->sem == sem && sem_dbg->pid == current->pid) {
656 list_del(&sem_dbg->list);
657 list_add(&sem_dbg->list, &rwsem_debug_free_head.list);
658 rwsem_debug_free_head_cnt++;
659 rwsem_debug_done_head_cnt--;
663 spin_unlock_irqrestore(&rwsem_debug_lock, flags);
665 #endif /* CONFIG_SEC_DEBUG_SEMAPHORE_LOG */
667 #ifdef CONFIG_TARGET_LOCALE_KOR
668 void sec_user_fault_dump(void)
670 if (enable == 1 && enable_user == 1)
674 static int sec_user_fault_write(struct file *file, const char __user *buffer,
675 size_t count, loff_t *offs)
679 if (count > sizeof(buf) - 1)
681 if (copy_from_user(buf, buffer, count))
685 if (strncmp(buf, "dump_user_fault", 15) == 0)
686 sec_user_fault_dump();
691 static const struct file_operations sec_user_fault_proc_fops = {
692 .write = sec_user_fault_write,
695 static int __init sec_debug_user_fault_init(void)
697 struct proc_dir_entry *entry;
699 entry = proc_create("user_fault", S_IWUGO, NULL,
700 &sec_user_fault_proc_fops);
705 device_initcall(sec_debug_user_fault_init);