2 * drivers/misc/tegra-profiler/backtrace.c
4 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/uaccess.h>
20 #include <linux/sched.h>
23 #include <linux/tegra_profiler.h>
26 #include "backtrace.h"
27 #include "eh_unwind.h"
28 #include "dwarf_unwind.h"
33 is_table_unwinding(struct quadd_callchain *cc)
35 return cc->um.ut || cc->um.dwarf;
39 quadd_user_stack_pointer(struct pt_regs *regs)
42 if (compat_user_mode(regs))
43 return regs->compat_sp;
46 return user_stack_pointer(regs);
50 quadd_get_user_frame_pointer(struct pt_regs *regs)
55 if (compat_user_mode(regs))
56 fp = is_thumb_mode(regs) ?
57 regs->compat_usr(7) : regs->compat_usr(11);
61 fp = is_thumb_mode(regs) ? regs->ARM_r7 : regs->ARM_fp;
67 quadd_user_link_register(struct pt_regs *regs)
70 return compat_user_mode(regs) ?
71 regs->compat_lr : regs->regs[30];
78 put_unw_type(u32 *p, int bt_idx, unsigned int type)
82 word_idx = bt_idx / 8;
83 shift = (bt_idx % 8) * 4;
85 *(p + word_idx) &= ~(0x0f << shift);
86 *(p + word_idx) |= (type & 0x0f) << shift;
90 quadd_callchain_store(struct quadd_callchain *cc,
91 unsigned long ip, unsigned int type)
93 unsigned long low_addr = cc->hrt->low_addr;
95 if (ip < low_addr || !validate_pc_addr(ip, sizeof(unsigned long))) {
96 cc->urc_fp = QUADD_URC_PC_INCORRECT;
100 if (cc->nr >= QUADD_MAX_STACK_DEPTH) {
101 cc->urc_fp = QUADD_URC_LEVEL_TOO_DEEP;
105 put_unw_type(cc->types, cc->nr, type);
108 cc->ip_64[cc->nr++] = ip;
110 cc->ip_32[cc->nr++] = ip;
116 is_ex_entry_exist(struct pt_regs *regs,
118 struct task_struct *task)
120 return quadd_is_ex_entry_exist_dwarf(regs, addr, task) ||
121 quadd_is_ex_entry_exist_arm32_ehabi(regs, addr, task);
124 static unsigned long __user *
125 user_backtrace(struct pt_regs *regs,
126 unsigned long __user *tail,
127 struct quadd_callchain *cc,
128 struct vm_area_struct *stack_vma,
129 struct task_struct *task)
132 unsigned long value, value_lr = 0, value_fp = 0;
133 unsigned long __user *fp_prev = NULL;
135 if (!is_vma_addr((unsigned long)tail, stack_vma, sizeof(*tail)))
138 if (__copy_from_user_inatomic(&value, tail, sizeof(unsigned long))) {
139 cc->urc_fp = QUADD_URC_EACCESS;
143 if (is_vma_addr(value, stack_vma, sizeof(value))) {
144 /* gcc thumb/clang frame */
147 if (!is_vma_addr((unsigned long)(tail + 1), stack_vma,
151 if (__copy_from_user_inatomic(&value_lr, tail + 1,
153 cc->urc_fp = QUADD_URC_EACCESS;
157 cc->curr_fp = value_fp;
158 cc->curr_sp = (unsigned long)tail + sizeof(value_fp) * 2;
159 cc->curr_pc = value_lr;
162 if (__copy_from_user_inatomic(&value_fp, tail - 1,
164 cc->urc_fp = QUADD_URC_EACCESS;
168 if (!is_vma_addr(value_fp, stack_vma, sizeof(value_fp)))
171 cc->curr_fp = value_fp;
172 cc->curr_sp = (unsigned long)tail + sizeof(value_fp);
173 cc->curr_pc = value_lr = value;
176 fp_prev = (unsigned long __user *)value_fp;
180 nr_added = quadd_callchain_store(cc, value_lr, QUADD_UNW_TYPE_FP);
184 if (is_table_unwinding(cc) &&
185 is_ex_entry_exist(regs, value_lr, task))
192 get_user_callchain_fp(struct pt_regs *regs,
193 struct quadd_callchain *cc,
194 struct task_struct *task)
196 unsigned long fp, sp, pc, reg;
197 struct vm_area_struct *vma, *vma_pc = NULL;
198 unsigned long __user *tail = NULL;
199 struct mm_struct *mm = task->mm;
202 cc->urc_fp = QUADD_URC_FP_INCORRECT;
205 cc->urc_fp = QUADD_URC_FAILURE;
209 sp = quadd_user_stack_pointer(regs);
210 pc = instruction_pointer(regs);
211 fp = quadd_get_user_frame_pointer(regs);
213 if (fp == 0 || fp < sp || fp & 0x3)
216 vma = find_vma(mm, sp);
218 cc->urc_fp = QUADD_URC_SP_INCORRECT;
222 if (!is_vma_addr(fp, vma, sizeof(fp)))
225 if (probe_kernel_address(fp, reg)) {
226 pr_warn_once("%s: failed for address: %#lx\n", __func__, fp);
227 cc->urc_fp = QUADD_URC_EACCESS;
231 pr_debug("sp/fp: %#lx/%#lx, pc/lr: %#lx/%#lx, *fp: %#lx, stack: %#lx-%#lx\n",
232 sp, fp, pc, quadd_user_link_register(regs), reg,
233 vma->vm_start, vma->vm_end);
235 if (is_thumb_mode(regs)) {
236 if (reg <= fp || !is_vma_addr(reg, vma, sizeof(reg)))
238 } else if (reg > fp && is_vma_addr(reg, vma, sizeof(reg))) {
243 if (is_vma_addr(fp + sizeof(unsigned long), vma, sizeof(fp))) {
244 if (__copy_from_user_inatomic(
246 (unsigned long __user *)fp + 1,
247 sizeof(unsigned long))) {
248 cc->urc_fp = QUADD_URC_EACCESS;
252 vma_pc = find_vma(mm, pc);
256 if (!read_lr || !is_vma_addr(value, vma_pc, sizeof(value))) {
257 /* gcc: fp --> short frame tail (fp) */
259 unsigned long lr = quadd_user_link_register(regs);
261 nr_added = quadd_callchain_store(cc, lr,
266 tail = (unsigned long __user *)reg;
271 tail = (unsigned long __user *)fp;
273 while (tail && !((unsigned long)tail & 0x3))
274 tail = user_backtrace(regs, tail, cc, vma, task);
280 __user_backtrace(struct pt_regs *regs,
281 struct quadd_callchain *cc,
282 struct task_struct *task)
284 struct mm_struct *mm = task->mm;
285 struct vm_area_struct *vma;
286 unsigned long __user *tail;
288 cc->urc_fp = QUADD_URC_FP_INCORRECT;
291 cc->urc_fp = QUADD_URC_FAILURE;
295 vma = find_vma(mm, cc->curr_sp);
297 cc->urc_fp = QUADD_URC_SP_INCORRECT;
301 tail = (unsigned long __user *)cc->curr_fp;
303 while (tail && !((unsigned long)tail & 0x3))
304 tail = user_backtrace(regs, tail, cc, vma, task);
311 user_backtrace_compat(struct pt_regs *regs,
313 struct quadd_callchain *cc,
314 struct vm_area_struct *stack_vma,
315 struct task_struct *task)
318 u32 value, value_lr = 0, value_fp = 0;
319 u32 __user *fp_prev = NULL;
321 if (!is_vma_addr((unsigned long)tail, stack_vma, sizeof(*tail)))
324 if (__copy_from_user_inatomic(&value, tail, sizeof(value))) {
325 cc->urc_fp = QUADD_URC_EACCESS;
329 if (is_vma_addr(value, stack_vma, sizeof(value))) {
330 /* gcc thumb/clang frame */
333 if (!is_vma_addr((unsigned long)(tail + 1), stack_vma,
337 if (__copy_from_user_inatomic(&value_lr, tail + 1,
339 cc->urc_fp = QUADD_URC_EACCESS;
343 cc->curr_fp = value_fp;
344 cc->curr_sp = (unsigned long)tail + sizeof(value_fp) * 2;
345 cc->curr_pc = value_lr;
348 if (__copy_from_user_inatomic(&value_fp, tail - 1,
350 cc->urc_fp = QUADD_URC_EACCESS;
354 if (!is_vma_addr(value_fp, stack_vma, sizeof(value_fp)))
357 cc->curr_fp = value_fp;
358 cc->curr_sp = (unsigned long)tail + sizeof(value_fp);
359 cc->curr_pc = value_lr = value;
362 fp_prev = (u32 __user *)(unsigned long)value_fp;
366 nr_added = quadd_callchain_store(cc, value_lr, QUADD_UNW_TYPE_FP);
370 if (is_table_unwinding(cc) &&
371 is_ex_entry_exist(regs, value_lr, task))
378 get_user_callchain_fp_compat(struct pt_regs *regs,
379 struct quadd_callchain *cc,
380 struct task_struct *task)
383 struct vm_area_struct *vma, *vma_pc = NULL;
384 u32 __user *tail = NULL;
385 struct mm_struct *mm = task->mm;
388 cc->urc_fp = QUADD_URC_FP_INCORRECT;
391 cc->urc_fp = QUADD_URC_FAILURE;
395 sp = quadd_user_stack_pointer(regs);
396 pc = instruction_pointer(regs);
397 fp = quadd_get_user_frame_pointer(regs);
399 if (fp == 0 || fp < sp || fp & 0x3)
402 vma = find_vma(mm, sp);
404 cc->urc_fp = QUADD_URC_SP_INCORRECT;
408 if (!is_vma_addr(fp, vma, sizeof(fp)))
411 if (probe_kernel_address((unsigned long)fp, reg)) {
412 pr_warn_once("%s: failed for address: %#x\n", __func__, fp);
413 cc->urc_fp = QUADD_URC_EACCESS;
417 pr_debug("sp/fp: %#x/%#x, pc/lr: %#x/%#x, *fp: %#x, stack: %#lx-%#lx\n",
418 sp, fp, pc, (u32)quadd_user_link_register(regs), reg,
419 vma->vm_start, vma->vm_end);
421 if (is_thumb_mode(regs)) {
422 if (reg <= fp || !is_vma_addr(reg, vma, sizeof(reg)))
424 } else if (reg > fp && is_vma_addr(reg, vma, sizeof(reg))) {
429 if (is_vma_addr(fp + sizeof(u32), vma, sizeof(fp))) {
430 if (__copy_from_user_inatomic(
432 (u32 __user *)(fp + sizeof(u32)),
434 cc->urc_fp = QUADD_URC_EACCESS;
438 vma_pc = find_vma(mm, pc);
442 if (!read_lr || !is_vma_addr(value, vma_pc, sizeof(value))) {
443 /* gcc: fp --> short frame tail (fp) */
445 u32 lr = quadd_user_link_register(regs);
447 nr_added = quadd_callchain_store(cc, lr,
452 tail = (u32 __user *)(unsigned long)reg;
457 tail = (u32 __user *)(unsigned long)fp;
459 while (tail && !((unsigned long)tail & 0x3))
460 tail = user_backtrace_compat(regs, tail, cc, vma, task);
466 __user_backtrace_compat(struct pt_regs *regs,
467 struct quadd_callchain *cc,
468 struct task_struct *task)
470 struct mm_struct *mm = task->mm;
471 struct vm_area_struct *vma;
474 cc->urc_fp = QUADD_URC_FP_INCORRECT;
477 cc->urc_fp = QUADD_URC_FAILURE;
481 vma = find_vma(mm, cc->curr_sp);
483 cc->urc_fp = QUADD_URC_SP_INCORRECT;
487 tail = (u32 __user *)cc->curr_fp;
489 while (tail && !((unsigned long)tail & 0x3))
490 tail = user_backtrace_compat(regs, tail, cc, vma, task);
495 #endif /* CONFIG_ARM64 */
498 __get_user_callchain_fp(struct pt_regs *regs,
499 struct quadd_callchain *cc,
500 struct task_struct *task)
503 if (cc->urc_fp == QUADD_URC_LEVEL_TOO_DEEP)
507 if (compat_user_mode(regs))
508 __user_backtrace_compat(regs, cc, task);
510 __user_backtrace(regs, cc, task);
512 __user_backtrace(regs, cc, task);
519 if (compat_user_mode(regs))
520 return get_user_callchain_fp_compat(regs, cc, task);
523 return get_user_callchain_fp(regs, cc, task);
527 get_user_callchain_mixed(struct pt_regs *regs,
528 struct quadd_callchain *cc,
529 struct task_struct *task)
532 unsigned long prev_sp;
533 struct quadd_unw_methods *um = &cc->um;
537 prev_sp = cc->curr_sp;
540 quadd_get_user_cc_dwarf(regs, cc, task);
542 quadd_get_user_cc_arm32_ehabi(regs, cc, task);
544 if (nr_prev != cc->nr) {
546 cc->curr_sp <= prev_sp)
553 __get_user_callchain_fp(regs, cc, task);
554 } while (nr_prev != cc->nr &&
555 (cc->nr <= 1 || cc->curr_sp > prev_sp));
561 quadd_get_user_callchain(struct pt_regs *regs,
562 struct quadd_callchain *cc,
563 struct quadd_ctx *ctx,
564 struct task_struct *task)
569 cc->urc_fp = QUADD_URC_FAILURE;
570 cc->urc_ut = QUADD_URC_FAILURE;
571 cc->urc_dwarf = QUADD_URC_FAILURE;
577 cc->curr_fp_thumb = 0;
581 cc->cs_64 = compat_user_mode(regs) ? 0 : 1;
586 cc->urc_fp = QUADD_URC_NONE;
587 cc->urc_ut = QUADD_URC_NONE;
588 cc->urc_dwarf = QUADD_URC_NONE;
590 get_user_callchain_mixed(regs, cc, task);