2 * drivers/misc/tegra-profiler/backtrace.c
4 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/uaccess.h>
20 #include <linux/sched.h>
23 #include <linux/tegra_profiler.h>
26 #include "backtrace.h"
27 #include "eh_unwind.h"
28 #include "dwarf_unwind.h"
33 quadd_user_stack_pointer(struct pt_regs *regs)
36 if (compat_user_mode(regs))
37 return regs->compat_sp;
40 return user_stack_pointer(regs);
44 quadd_get_user_frame_pointer(struct pt_regs *regs)
49 if (compat_user_mode(regs))
50 fp = is_thumb_mode(regs) ?
51 regs->compat_usr(7) : regs->compat_usr(11);
55 fp = is_thumb_mode(regs) ? regs->ARM_r7 : regs->ARM_fp;
61 quadd_user_link_register(struct pt_regs *regs)
64 return compat_user_mode(regs) ?
65 regs->compat_lr : regs->regs[30];
72 put_unw_type(u32 *p, int bt_idx, unsigned int type)
76 word_idx = bt_idx / 8;
77 shift = (bt_idx % 8) * 4;
79 *(p + word_idx) &= ~(0x0f << shift);
80 *(p + word_idx) |= (type & 0x0f) << shift;
84 quadd_callchain_store(struct quadd_callchain *cc,
85 unsigned long ip, unsigned int type)
87 unsigned long low_addr = cc->hrt->low_addr;
89 if (ip < low_addr || !validate_pc_addr(ip, sizeof(unsigned long))) {
90 cc->unw_rc = QUADD_URC_PC_INCORRECT;
94 if (cc->nr >= QUADD_MAX_STACK_DEPTH) {
95 cc->unw_rc = QUADD_URC_LEVEL_TOO_DEEP;
99 put_unw_type(cc->types, cc->nr, type);
102 cc->ip_64[cc->nr++] = ip;
104 cc->ip_32[cc->nr++] = ip;
110 is_ex_entry_exist(struct pt_regs *regs,
112 struct task_struct *task)
114 return quadd_is_ex_entry_exist_dwarf(regs, addr, task) ||
115 quadd_is_ex_entry_exist_arm32_ehabi(regs, addr, task);
118 static unsigned long __user *
119 user_backtrace(struct pt_regs *regs,
120 unsigned long __user *tail,
121 struct quadd_callchain *cc,
122 struct vm_area_struct *stack_vma,
123 struct task_struct *task)
126 unsigned long value, value_lr = 0, value_fp = 0;
127 unsigned long __user *fp_prev = NULL;
129 if (!is_vma_addr((unsigned long)tail, stack_vma, sizeof(*tail)))
132 if (__copy_from_user_inatomic(&value, tail, sizeof(unsigned long))) {
133 cc->unw_rc = QUADD_URC_EACCESS;
137 if (is_vma_addr(value, stack_vma, sizeof(value))) {
138 /* gcc thumb/clang frame */
141 if (!is_vma_addr((unsigned long)(tail + 1), stack_vma,
145 if (__copy_from_user_inatomic(&value_lr, tail + 1,
147 cc->unw_rc = QUADD_URC_EACCESS;
151 cc->curr_fp = value_fp;
152 cc->curr_sp = (unsigned long)tail + sizeof(value_fp) * 2;
153 cc->curr_pc = value_lr;
156 if (__copy_from_user_inatomic(&value_fp, tail - 1,
158 cc->unw_rc = QUADD_URC_EACCESS;
162 if (!is_vma_addr(value_fp, stack_vma, sizeof(value_fp)))
165 cc->curr_fp = value_fp;
166 cc->curr_sp = (unsigned long)tail + sizeof(value_fp);
167 cc->curr_pc = value_lr = value;
170 fp_prev = (unsigned long __user *)value_fp;
174 nr_added = quadd_callchain_store(cc, value_lr, QUADD_UNW_TYPE_FP);
178 if (cc->unw_method == QUADD_UNW_METHOD_MIXED &&
179 is_ex_entry_exist(regs, value_lr, task))
186 get_user_callchain_fp(struct pt_regs *regs,
187 struct quadd_callchain *cc,
188 struct task_struct *task)
190 unsigned long fp, sp, pc, reg;
191 struct vm_area_struct *vma, *vma_pc = NULL;
192 unsigned long __user *tail = NULL;
193 struct mm_struct *mm = task->mm;
196 cc->unw_rc = QUADD_URC_FP_INCORRECT;
199 cc->unw_rc = QUADD_URC_FAILURE;
203 sp = quadd_user_stack_pointer(regs);
204 pc = instruction_pointer(regs);
205 fp = quadd_get_user_frame_pointer(regs);
207 if (fp == 0 || fp < sp || fp & 0x3)
210 vma = find_vma(mm, sp);
212 cc->unw_rc = QUADD_URC_SP_INCORRECT;
216 if (!is_vma_addr(fp, vma, sizeof(fp)))
219 if (probe_kernel_address(fp, reg)) {
220 pr_warn_once("%s: failed for address: %#lx\n", __func__, fp);
221 cc->unw_rc = QUADD_URC_EACCESS;
225 pr_debug("sp/fp: %#lx/%#lx, pc/lr: %#lx/%#lx, *fp: %#lx, stack: %#lx-%#lx\n",
226 sp, fp, pc, quadd_user_link_register(regs), reg,
227 vma->vm_start, vma->vm_end);
229 if (is_thumb_mode(regs)) {
230 if (reg <= fp || !is_vma_addr(reg, vma, sizeof(reg)))
232 } else if (reg > fp && is_vma_addr(reg, vma, sizeof(reg))) {
237 if (is_vma_addr(fp + sizeof(unsigned long), vma, sizeof(fp))) {
238 if (__copy_from_user_inatomic(
240 (unsigned long __user *)fp + 1,
241 sizeof(unsigned long))) {
242 cc->unw_rc = QUADD_URC_EACCESS;
246 vma_pc = find_vma(mm, pc);
250 if (!read_lr || !is_vma_addr(value, vma_pc, sizeof(value))) {
251 /* gcc: fp --> short frame tail (fp) */
253 unsigned long lr = quadd_user_link_register(regs);
255 nr_added = quadd_callchain_store(cc, lr,
260 tail = (unsigned long __user *)reg;
265 tail = (unsigned long __user *)fp;
267 while (tail && !((unsigned long)tail & 0x3))
268 tail = user_backtrace(regs, tail, cc, vma, task);
274 __user_backtrace(struct pt_regs *regs,
275 struct quadd_callchain *cc,
276 struct task_struct *task)
278 struct mm_struct *mm = task->mm;
279 struct vm_area_struct *vma;
280 unsigned long __user *tail;
282 cc->unw_rc = QUADD_URC_FP_INCORRECT;
285 cc->unw_rc = QUADD_URC_FAILURE;
289 vma = find_vma(mm, cc->curr_sp);
291 cc->unw_rc = QUADD_URC_SP_INCORRECT;
295 tail = (unsigned long __user *)cc->curr_fp;
297 while (tail && !((unsigned long)tail & 0x3))
298 tail = user_backtrace(regs, tail, cc, vma, task);
305 user_backtrace_compat(struct pt_regs *regs,
307 struct quadd_callchain *cc,
308 struct vm_area_struct *stack_vma,
309 struct task_struct *task)
312 u32 value, value_lr = 0, value_fp = 0;
313 u32 __user *fp_prev = NULL;
315 if (!is_vma_addr((unsigned long)tail, stack_vma, sizeof(*tail)))
318 if (__copy_from_user_inatomic(&value, tail, sizeof(value))) {
319 cc->unw_rc = QUADD_URC_EACCESS;
323 if (is_vma_addr(value, stack_vma, sizeof(value))) {
324 /* gcc thumb/clang frame */
327 if (!is_vma_addr((unsigned long)(tail + 1), stack_vma,
331 if (__copy_from_user_inatomic(&value_lr, tail + 1,
333 cc->unw_rc = QUADD_URC_EACCESS;
337 cc->curr_fp = value_fp;
338 cc->curr_sp = (unsigned long)tail + sizeof(value_fp) * 2;
339 cc->curr_pc = value_lr;
342 if (__copy_from_user_inatomic(&value_fp, tail - 1,
344 cc->unw_rc = QUADD_URC_EACCESS;
348 if (!is_vma_addr(value_fp, stack_vma, sizeof(value_fp)))
351 cc->curr_fp = value_fp;
352 cc->curr_sp = (unsigned long)tail + sizeof(value_fp);
353 cc->curr_pc = value_lr = value;
356 fp_prev = (u32 __user *)(unsigned long)value_fp;
360 nr_added = quadd_callchain_store(cc, value_lr, QUADD_UNW_TYPE_FP);
364 if (cc->unw_method == QUADD_UNW_METHOD_MIXED &&
365 is_ex_entry_exist(regs, value_lr, task))
372 get_user_callchain_fp_compat(struct pt_regs *regs,
373 struct quadd_callchain *cc,
374 struct task_struct *task)
377 struct vm_area_struct *vma, *vma_pc = NULL;
378 u32 __user *tail = NULL;
379 struct mm_struct *mm = task->mm;
382 cc->unw_rc = QUADD_URC_FP_INCORRECT;
385 cc->unw_rc = QUADD_URC_FAILURE;
389 sp = quadd_user_stack_pointer(regs);
390 pc = instruction_pointer(regs);
391 fp = quadd_get_user_frame_pointer(regs);
393 if (fp == 0 || fp < sp || fp & 0x3)
396 vma = find_vma(mm, sp);
398 cc->unw_rc = QUADD_URC_SP_INCORRECT;
402 if (!is_vma_addr(fp, vma, sizeof(fp)))
405 if (probe_kernel_address((unsigned long)fp, reg)) {
406 pr_warn_once("%s: failed for address: %#x\n", __func__, fp);
407 cc->unw_rc = QUADD_URC_EACCESS;
411 pr_debug("sp/fp: %#x/%#x, pc/lr: %#x/%#x, *fp: %#x, stack: %#lx-%#lx\n",
412 sp, fp, pc, (u32)quadd_user_link_register(regs), reg,
413 vma->vm_start, vma->vm_end);
415 if (is_thumb_mode(regs)) {
416 if (reg <= fp || !is_vma_addr(reg, vma, sizeof(reg)))
418 } else if (reg > fp && is_vma_addr(reg, vma, sizeof(reg))) {
423 if (is_vma_addr(fp + sizeof(u32), vma, sizeof(fp))) {
424 if (__copy_from_user_inatomic(
426 (u32 __user *)(fp + sizeof(u32)),
428 cc->unw_rc = QUADD_URC_EACCESS;
432 vma_pc = find_vma(mm, pc);
436 if (!read_lr || !is_vma_addr(value, vma_pc, sizeof(value))) {
437 /* gcc: fp --> short frame tail (fp) */
439 u32 lr = quadd_user_link_register(regs);
441 nr_added = quadd_callchain_store(cc, lr,
446 tail = (u32 __user *)(unsigned long)reg;
451 tail = (u32 __user *)(unsigned long)fp;
453 while (tail && !((unsigned long)tail & 0x3))
454 tail = user_backtrace_compat(regs, tail, cc, vma, task);
460 __user_backtrace_compat(struct pt_regs *regs,
461 struct quadd_callchain *cc,
462 struct task_struct *task)
464 struct mm_struct *mm = task->mm;
465 struct vm_area_struct *vma;
468 cc->unw_rc = QUADD_URC_FP_INCORRECT;
471 cc->unw_rc = QUADD_URC_FAILURE;
475 vma = find_vma(mm, cc->curr_sp);
477 cc->unw_rc = QUADD_URC_SP_INCORRECT;
481 tail = (u32 __user *)cc->curr_fp;
483 while (tail && !((unsigned long)tail & 0x3))
484 tail = user_backtrace_compat(regs, tail, cc, vma, task);
489 #endif /* CONFIG_ARM64 */
492 __get_user_callchain_fp(struct pt_regs *regs,
493 struct quadd_callchain *cc,
494 struct task_struct *task)
497 if (cc->unw_rc == QUADD_URC_LEVEL_TOO_DEEP)
501 if (compat_user_mode(regs))
502 __user_backtrace_compat(regs, cc, task);
504 __user_backtrace(regs, cc, task);
506 __user_backtrace(regs, cc, task);
513 if (compat_user_mode(regs))
514 return get_user_callchain_fp_compat(regs, cc, task);
517 return get_user_callchain_fp(regs, cc, task);
521 get_user_callchain_ut(struct pt_regs *regs,
522 struct quadd_callchain *cc,
523 struct task_struct *task)
526 unsigned long prev_sp;
530 prev_sp = cc->curr_sp;
532 quadd_get_user_cc_dwarf(regs, cc, task);
533 if (nr_prev > 0 && cc->nr == nr_prev)
538 quadd_get_user_cc_arm32_ehabi(regs, cc, task);
539 } while (nr_prev != cc->nr &&
540 (cc->nr <= 1 || cc->curr_sp > prev_sp));
546 get_user_callchain_mixed(struct pt_regs *regs,
547 struct quadd_callchain *cc,
548 struct task_struct *task)
551 unsigned long prev_sp;
555 prev_sp = cc->curr_sp;
557 quadd_get_user_cc_dwarf(regs, cc, task);
558 quadd_get_user_cc_arm32_ehabi(regs, cc, task);
560 if (nr_prev != cc->nr) {
562 cc->curr_sp <= prev_sp)
568 __get_user_callchain_fp(regs, cc, task);
569 } while (nr_prev != cc->nr &&
570 (cc->nr <= 1 || cc->curr_sp > prev_sp));
576 quadd_get_user_callchain(struct pt_regs *regs,
577 struct quadd_callchain *cc,
578 struct quadd_ctx *ctx,
579 struct task_struct *task)
581 unsigned int method = cc->unw_method;
586 cc->unw_rc = QUADD_URC_FAILURE;
592 cc->curr_fp_thumb = 0;
596 cc->cs_64 = compat_user_mode(regs) ? 0 : 1;
604 case QUADD_UNW_METHOD_FP:
605 __get_user_callchain_fp(regs, cc, task);
608 case QUADD_UNW_METHOD_EHT:
609 get_user_callchain_ut(regs, cc, task);
612 case QUADD_UNW_METHOD_MIXED:
613 get_user_callchain_mixed(regs, cc, task);
616 case QUADD_UNW_METHOD_NONE: