2 * Based on arch/arm/kernel/traps.c
4 * Copyright (C) 1995-2009 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/signal.h>
22 #include <linux/personality.h>
23 #include <linux/kallsyms.h>
24 #include <linux/spinlock.h>
25 #include <linux/uaccess.h>
26 #include <linux/hardirq.h>
27 #include <linux/kdebug.h>
28 #include <linux/module.h>
29 #include <linux/kexec.h>
30 #include <linux/delay.h>
31 #include <linux/init.h>
32 #include <linux/sched.h>
33 #include <linux/syscalls.h>
35 #include <asm/atomic.h>
36 #include <asm/debug-monitors.h>
37 #include <asm/traps.h>
38 #include <asm/stacktrace.h>
39 #include <asm/exception.h>
40 #include <asm/system_misc.h>
42 static const char *handler[]= {
49 int show_unhandled_signals = 1;
52 * Dump out the contents of some memory nicely...
54 static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
62 * We need to switch to kernel mode so that we can use __get_user
63 * to safely read from kernel space. Note that we now dump the
64 * code first, just in case the backtrace kills us.
69 printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top);
71 for (first = bottom & ~31; first < top; first += 32) {
73 char str[sizeof(" 12345678") * 8 + 1];
75 memset(str, ' ', sizeof(str));
76 str[sizeof(str) - 1] = '\0';
78 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
79 if (p >= bottom && p < top) {
81 if (__get_user(val, (unsigned int *)p) == 0)
82 sprintf(str + i * 9, " %08x", val);
84 sprintf(str + i * 9, " ????????");
87 printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
93 static void dump_backtrace_entry(unsigned long where, unsigned long stack)
96 if (in_exception_text(where))
97 dump_mem("", "Exception stack", stack,
98 stack + sizeof(struct pt_regs));
101 static void dump_instr(const char *lvl, struct pt_regs *regs)
103 unsigned long addr = instruction_pointer(regs);
105 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
109 * We need to switch to kernel mode so that we can use __get_user
110 * to safely read from kernel space. Note that we now dump the
111 * code first, just in case the backtrace kills us.
116 for (i = -4; i < 1; i++) {
117 unsigned int val, bad;
119 bad = __get_user(val, &((u32 *)addr)[i]);
122 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
124 p += sprintf(p, "bad PC value");
128 printk("%sCode: %s\n", lvl, str);
133 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
135 struct stackframe frame;
136 const register unsigned long current_sp asm ("sp");
138 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
144 frame.fp = regs->regs[29];
147 } else if (tsk == current) {
148 frame.fp = (unsigned long)__builtin_frame_address(0);
149 frame.sp = current_sp;
150 frame.pc = (unsigned long)dump_backtrace;
153 * task blocked in __switch_to
155 frame.fp = thread_saved_fp(tsk);
156 frame.sp = thread_saved_sp(tsk);
157 frame.pc = thread_saved_pc(tsk);
160 printk("Call trace:\n");
162 unsigned long where = frame.pc;
165 ret = unwind_frame(&frame);
168 dump_backtrace_entry(where, frame.sp);
172 void show_stack(struct task_struct *tsk, unsigned long *sp)
174 dump_backtrace(NULL, tsk);
178 #ifdef CONFIG_PREEMPT
179 #define S_PREEMPT " PREEMPT"
189 static int __die(const char *str, int err, struct thread_info *thread,
190 struct pt_regs *regs)
192 struct task_struct *tsk = thread->task;
193 static int die_counter;
196 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
197 str, err, ++die_counter);
199 /* trap and error numbers are mostly meaningless on ARM */
200 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
201 if (ret == NOTIFY_STOP)
206 pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
207 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
209 if (!user_mode(regs) || in_interrupt()) {
210 dump_mem(KERN_EMERG, "Stack: ", regs->sp,
211 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
212 dump_backtrace(regs, tsk);
213 dump_instr(KERN_EMERG, regs);
219 static DEFINE_RAW_SPINLOCK(die_lock);
222 * This function is protected against re-entrancy.
224 void die(const char *str, struct pt_regs *regs, int err)
226 struct thread_info *thread = current_thread_info();
231 raw_spin_lock_irq(&die_lock);
234 ret = __die(str, err, thread, regs);
236 if (regs && kexec_should_crash(thread->task))
240 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
241 raw_spin_unlock_irq(&die_lock);
245 panic("Fatal exception in interrupt");
247 panic("Fatal exception");
248 if (ret != NOTIFY_STOP)
252 void arm64_notify_die(const char *str, struct pt_regs *regs,
253 struct siginfo *info, int err)
256 force_sig_info(info->si_signo, info, current);
261 static LIST_HEAD(undef_hook);
263 void register_undef_hook(struct undef_hook *hook)
265 list_add(&hook->node, &undef_hook);
268 static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
270 struct undef_hook *hook;
271 int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
273 list_for_each_entry(hook, &undef_hook, node)
274 if ((instr & hook->instr_mask) == hook->instr_val &&
275 (regs->pstate & hook->pstate_mask) == hook->pstate_val)
278 return fn ? fn(regs, instr) : 1;
281 asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
285 void __user *pc = (void __user *)instruction_pointer(regs);
287 /* check for AArch32 breakpoint instructions */
288 if (!aarch32_break_handler(regs))
290 if (user_mode(regs)) {
291 if (compat_thumb_mode(regs)) {
292 if (get_user(instr, (u16 __user *)pc))
294 if (is_wide_instruction(instr)) {
296 if (get_user(instr2, (u16 __user *)pc+1))
301 } else if (get_user(instr, (u32 __user *)pc)) {
306 instr = *((u32 *)pc);
309 if (call_undef_hook(regs, instr) == 0)
313 if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
314 printk_ratelimit()) {
315 pr_info("%s[%d]: undefined instruction: pc=%p\n",
316 current->comm, task_pid_nr(current), pc);
317 dump_instr(KERN_INFO, regs);
320 info.si_signo = SIGILL;
322 info.si_code = ILL_ILLOPC;
325 arm64_notify_die("Oops - undefined instruction", regs, &info, 0);
328 long compat_arm_syscall(struct pt_regs *regs);
330 asmlinkage long do_ni_syscall(struct pt_regs *regs)
334 if (is_compat_task()) {
335 ret = compat_arm_syscall(regs);
341 if (show_unhandled_signals && printk_ratelimit()) {
342 pr_info("%s[%d]: syscall %d\n", current->comm,
343 task_pid_nr(current), (int)regs->syscallno);
344 dump_instr("", regs);
349 return sys_ni_syscall();
352 #ifdef CONFIG_DENVER_CPU
354 * MCA assert register dump
356 void dump_mca_debug(void)
360 unsigned long serri_ctrl, serri_status, serri_addr,
361 serri_misc1, serri_misc2;
363 pr_crit("Machine Check Architecture assert failed:\n");
365 asm volatile("mrs %0, s3_0_c15_c3_0" : "=r" (cap) : );
371 asm volatile("mrs %0, s3_0_c15_c11_4" : "=r" (serri_ctrl) : );
372 asm volatile("mrs %0, s3_0_c15_c11_5" : "=r" (serri_status) : );
373 asm volatile("mrs %0, s3_0_c15_c11_6" : "=r" (serri_addr) : );
374 pr_crit("[Bank 10] ctrl:0x%016lx status:0x%016lx addr:0x%016lx\n",
375 serri_ctrl, serri_status, serri_addr);
377 asm volatile("mrs %0, s3_0_c15_c10_6" : "=r" (serri_ctrl) : );
378 asm volatile("mrs %0, s3_0_c15_c10_7" : "=r" (serri_status) : );
379 asm volatile("mrs %0, s3_0_c15_c11_0" : "=r" (serri_addr) : );
380 pr_crit("[Bank 9] ctrl:0x%016lx status:0x%016lx addr:0x%016lx\n",
381 serri_ctrl, serri_status, serri_addr);
383 asm volatile("mrs %0, s3_0_c15_c10_0" : "=r" (serri_ctrl) : );
384 asm volatile("mrs %0, s3_0_c15_c10_1" : "=r" (serri_status) : );
385 asm volatile("mrs %0, s3_0_c15_c10_2" : "=r" (serri_addr) : );
386 pr_crit("[Bank 8] ctrl:0x%016lx status:0x%016lx addr:0x%016lx\n",
387 serri_ctrl, serri_status, serri_addr);
389 asm volatile("mrs %0, s3_0_c15_c9_2" : "=r" (serri_ctrl) : );
390 asm volatile("mrs %0, s3_0_c15_c9_3" : "=r" (serri_status) : );
391 asm volatile("mrs %0, s3_0_c15_c9_4" : "=r" (serri_addr) : );
392 pr_crit("[Bank 7] ctrl:0x%016lx status:0x%016lx addr:0x%016lx\n",
393 serri_ctrl, serri_status, serri_addr);
395 asm volatile("mrs %0, s3_0_c15_c8_4" : "=r" (serri_ctrl) : );
396 asm volatile("mrs %0, s3_0_c15_c8_5" : "=r" (serri_status) : );
397 asm volatile("mrs %0, s3_0_c15_c8_6" : "=r" (serri_addr) : );
398 asm volatile("mrs %0, s3_0_c15_c8_7" : "=r" (serri_misc1) : );
399 asm volatile("mrs %0, s3_0_c15_c9_0" : "=r" (serri_misc2) : );
400 pr_crit("[Bank 6] ctrl:0x%016lx status:0x%016lx addr:0x%016lx \
401 misc1:0x%016lx, misc2:0x%016lx\n",
402 serri_ctrl, serri_status, serri_addr, serri_misc1, serri_misc2);
404 asm volatile("mrs %0, s3_0_c15_c7_6" : "=r" (serri_ctrl) : );
405 asm volatile("mrs %0, s3_0_c15_c7_7" : "=r" (serri_status) : );
406 asm volatile("mrs %0, s3_0_c15_c8_0" : "=r" (serri_addr) : );
407 pr_crit("[Bank 5] ctrl:0x%016lx status:0x%016lx addr:0x%016lx\n",
408 serri_ctrl, serri_status, serri_addr);
410 asm volatile("mrs %0, s3_0_c15_c7_0" : "=r" (serri_ctrl) : );
411 asm volatile("mrs %0, s3_0_c15_c7_1" : "=r" (serri_status) : );
412 asm volatile("mrs %0, s3_0_c15_c7_2" : "=r" (serri_addr) : );
413 pr_crit("[Bank 4] ctrl:0x%016lx status:0x%016lx addr:0x%016lx\n",
414 serri_ctrl, serri_status, serri_addr);
416 asm volatile("mrs %0, s3_0_c15_c6_2" : "=r" (serri_ctrl) : );
417 asm volatile("mrs %0, s3_0_c15_c6_3" : "=r" (serri_status) : );
418 asm volatile("mrs %0, s3_0_c15_c6_4" : "=r" (serri_addr) : );
419 pr_crit("[Bank 3] ctrl:0x%016lx status:0x%016lx addr:0x%016lx\n",
420 serri_ctrl, serri_status, serri_addr);
422 asm volatile("mrs %0, s3_0_c15_c5_4" : "=r" (serri_ctrl) : );
423 asm volatile("mrs %0, s3_0_c15_c5_5" : "=r" (serri_status) : );
424 asm volatile("mrs %0, s3_0_c15_c5_6" : "=r" (serri_addr) : );
425 pr_crit("[Bank 2] ctrl:0x%016lx status:0x%016lx addr:0x%016lx\n",
426 serri_ctrl, serri_status, serri_addr);
428 asm volatile("mrs %0, s3_0_c15_c4_6" : "=r" (serri_ctrl) : );
429 asm volatile("mrs %0, s3_0_c15_c4_7" : "=r" (serri_status) : );
430 asm volatile("mrs %0, s3_0_c15_c5_0" : "=r" (serri_addr) : );
431 pr_crit("[Bank 1] ctrl:0x%016lx status:0x%016lx addr:0x%016lx\n",
432 serri_ctrl, serri_status, serri_addr);
434 asm volatile("mrs %0, s3_0_c15_c4_0" : "=r" (serri_ctrl) : );
435 asm volatile("mrs %0, s3_0_c15_c4_1" : "=r" (serri_status) : );
436 asm volatile("mrs %0, s3_0_c15_c4_2" : "=r" (serri_addr) : );
437 pr_crit("[Bank 0] ctrl:0x%016lx status:0x%016lx addr:0x%016lx\n",
438 serri_ctrl, serri_status, serri_addr);
442 pr_crit("no MCA banks implemented\n");
444 pr_crit("unknown MCA bank configuration\n");
450 #endif /* CONFIG_DENVER_CPU */
453 * bad_mode handles the impossible case in the exception vector.
455 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
458 unsigned long serr_status;
459 void __user *pc = (void __user *)instruction_pointer(regs);
462 #ifdef CONFIG_DENVER_CPU
463 /* check for MCA assert */
464 asm volatile("mrs %0, s3_0_c15_c3_1" : "=r" (serr_status));
468 asm volatile("msr s3_0_c15_c3_1, %0" : : "r" (serr_status));
473 pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
474 handler[reason], esr);
477 info.si_signo = SIGILL;
479 info.si_code = ILL_ILLOPC;
482 arm64_notify_die("Oops - bad mode", regs, &info, 0);
485 void __pte_error(const char *file, int line, unsigned long val)
487 printk("%s:%d: bad pte %016lx.\n", file, line, val);
490 void __pmd_error(const char *file, int line, unsigned long val)
492 printk("%s:%d: bad pmd %016lx.\n", file, line, val);
495 void __pgd_error(const char *file, int line, unsigned long val)
497 printk("%s:%d: bad pgd %016lx.\n", file, line, val);
500 void __init trap_init(void)