2 * drivers/misc/tegra-profiler/dwarf_unwind.c
4 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
25 #include <asm/unaligned.h>
27 #include <linux/tegra_profiler.h>
30 #include "backtrace.h"
31 #include "eh_unwind.h"
33 #include "dwarf_unwind.h"
36 DW_WHERE_UNDEF, /* register isn't saved at all */
37 DW_WHERE_SAME, /* register has same value as in prev. frame */
38 DW_WHERE_CFAREL, /* register saved at CFA-relative address */
39 DW_WHERE_REG, /* register saved in another register */
40 DW_WHERE_EXPR, /* register saved */
41 DW_WHERE_VAL_OFFSET, /* value offset */
42 DW_WHERE_VAL_EXPR, /* register has computed value */
45 #define QUADD_AARCH64_REGISTERS 32
46 #define QUADD_AARCH32_REGISTERS 16
48 #define QUADD_NUM_REGS QUADD_AARCH64_REGISTERS
73 const unsigned char *exp;
87 struct dw_eh_frame_hdr {
88 unsigned char version;
89 unsigned char eh_frame_ptr_enc;
90 unsigned char fde_count_enc;
91 unsigned char table_enc;
100 struct reg_info reg[QUADD_NUM_REGS];
105 unsigned char *cfa_expr;
106 unsigned int cfa_expr_len;
111 #define DW_MAX_RS_STACK_DEPTH 8
113 struct dwarf_cpu_context {
114 struct regs_state rs_stack[DW_MAX_RS_STACK_DEPTH];
120 struct quadd_dwarf_context {
121 struct dwarf_cpu_context __percpu *cpu_ctx;
127 unsigned long vregs[QUADD_NUM_REGS];
129 struct regs_state rs;
130 struct regs_state rs_initial;
138 unsigned long offset;
139 unsigned long length;
141 unsigned char *aug_string;
142 unsigned long aug_size;
144 unsigned char fde_encoding;
145 unsigned char lsda_encoding;
147 unsigned long code_align_factor;
148 long data_align_factor;
150 unsigned int initial_insn_len;
151 unsigned char *initial_insn;
154 unsigned int retaddr_reg;
161 unsigned long offset;
162 unsigned long length;
164 unsigned long cie_pointer;
167 unsigned long initial_location;
168 unsigned long address_range;
170 unsigned int insn_length;
171 unsigned char *instructions;
182 #define read_user_data(addr, retval) \
186 pagefault_disable(); \
187 ret = __get_user(retval, addr); \
188 pagefault_enable(); \
191 pr_debug("%s: failed for address: %p\n", \
193 ret = -QUADD_URC_EACCESS; \
199 static struct quadd_dwarf_context ctx;
201 static inline int regnum_sp(int mode)
203 return (mode == DW_MODE_ARM32) ?
207 static inline int regnum_fp(int mode)
209 return (mode == DW_MODE_ARM32) ?
213 static inline int regnum_lr(int mode)
215 return (mode == DW_MODE_ARM32) ?
219 static inline unsigned long
220 get_user_reg_size(int mode)
222 return (mode == DW_MODE_ARM32) ?
223 sizeof(u32) : sizeof(u64);
227 get_secid_frame(int is_eh)
230 QUADD_SEC_TYPE_EH_FRAME :
231 QUADD_SEC_TYPE_DEBUG_FRAME;
235 get_secid_frame_hdr(int is_eh)
238 QUADD_SEC_TYPE_EH_FRAME_HDR :
239 QUADD_SEC_TYPE_DEBUG_FRAME_HDR;
243 is_frame_present(struct ex_region_info *ri, int is_eh)
245 struct extab_info *ti, *ti_hdr;
247 ti = &ri->ex_sec[get_secid_frame(is_eh)];
248 ti_hdr = &ri->ex_sec[get_secid_frame_hdr(is_eh)];
250 return (ti->length && ti_hdr->length) ? 1 : 0;
254 validate_addr(struct ex_region_info *ri,
256 unsigned long nbytes,
259 struct extab_info *ti;
260 struct quadd_mmap_area *mmap;
261 unsigned long start, end;
265 ti = &ri->ex_sec[st];
267 start = (unsigned long)mmap->data + ti->mmap_offset;
268 end = start + ti->length;
270 if (unlikely(addr < start || addr > end - nbytes)) {
271 pr_err_once("%s: error: addr: %#lx, len: %ld, data: %#lx-%#lx\n",
272 __func__, addr, nbytes, start, end);
280 read_mmap_data_u8(struct ex_region_info *ri,
281 const u8 *addr, int st, long *err)
283 unsigned long a = (unsigned long)addr;
285 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
286 *err = -QUADD_URC_EACCESS;
295 read_mmap_data_u16(struct ex_region_info *ri,
296 const u16 *addr, int st, long *err)
298 unsigned long a = (unsigned long)addr;
300 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
301 *err = -QUADD_URC_EACCESS;
307 return get_unaligned(addr);
311 read_mmap_data_s16(struct ex_region_info *ri,
312 const s16 *addr, int st, long *err)
314 unsigned long a = (unsigned long)addr;
316 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
317 *err = -QUADD_URC_EACCESS;
323 return get_unaligned(addr);
327 read_mmap_data_u32(struct ex_region_info *ri,
328 const u32 *addr, int st, long *err)
330 unsigned long a = (unsigned long)addr;
332 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
333 *err = -QUADD_URC_EACCESS;
339 return get_unaligned(addr);
343 read_mmap_data_s32(struct ex_region_info *ri,
344 const s32 *addr, int st, long *err)
346 unsigned long a = (unsigned long)addr;
348 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
349 *err = -QUADD_URC_EACCESS;
355 return get_unaligned(addr);
359 read_mmap_data_s64(struct ex_region_info *ri,
360 const s64 *addr, int st, long *err)
362 unsigned long a = (unsigned long)addr;
364 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
365 *err = -QUADD_URC_EACCESS;
371 return get_unaligned(addr);
375 read_mmap_data_u64(struct ex_region_info *ri,
376 const u64 *addr, int st, long *err)
378 unsigned long a = (unsigned long)addr;
380 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
381 *err = -QUADD_URC_EACCESS;
387 return get_unaligned(addr);
390 static inline unsigned long
391 ex_addr_to_mmap_addr(unsigned long addr,
392 struct ex_region_info *ri, int st)
394 unsigned long offset;
395 struct extab_info *ti;
397 ti = &ri->ex_sec[st];
398 offset = addr - ti->addr;
400 return ti->mmap_offset + offset + (unsigned long)ri->mmap->data;
403 static inline unsigned long
404 mmap_addr_to_ex_addr(unsigned long addr,
405 struct ex_region_info *ri, int st)
407 unsigned long offset;
408 struct extab_info *ti;
410 ti = &ri->ex_sec[st];
411 offset = addr - ti->mmap_offset - (unsigned long)ri->mmap->data;
413 return ti->addr + offset;
416 static inline int validate_regnum(struct regs_state *rs, int regnum)
418 if (unlikely(regnum >= ARRAY_SIZE(rs->reg))) {
419 pr_err_once("error: invalid reg: %d\n", regnum);
427 set_rule_offset(struct regs_state *rs, int regnum, int where, long offset)
431 if (!validate_regnum(rs, regnum))
434 r = &rs->reg[regnum];
437 r->loc.offset = offset;
441 set_rule_reg(struct regs_state *rs, int regnum, int where, unsigned long reg)
445 if (!validate_regnum(rs, regnum))
448 r = &rs->reg[regnum];
455 set_rule_exp(struct regs_state *rs, int regnum,
456 int where, const unsigned char *exp)
460 if (!validate_regnum(rs, regnum))
463 r = &rs->reg[regnum];
470 set_rule(struct regs_state *rs, int regnum, int where, long value)
472 set_rule_offset(rs, regnum, where, value);
475 static inline unsigned long
476 dw_bst_get_initial_loc(const struct dw_fde_table *fi,
477 unsigned long data_base)
479 return data_base + fi->initial_loc;
482 static inline unsigned long
483 dw_bst_get_fde_addr(const struct dw_fde_table *fi,
484 unsigned long data_base)
486 return data_base + fi->fde;
489 static inline unsigned long
490 dwarf_read_uleb128(struct ex_region_info *ri,
496 unsigned long result;
505 byte = read_mmap_data_u8(ri, addr, st, err);
512 result |= (byte & 0x7f) << shift;
524 static inline unsigned long
525 dwarf_read_sleb128(struct ex_region_info *ri,
541 byte = read_mmap_data_u8(ri, addr, st, err);
546 result |= (byte & 0x7f) << shift;
554 num_bits = 8 * sizeof(result);
556 if ((shift < num_bits) && (byte & 0x40))
557 result |= (-1 << shift);
564 static inline unsigned int
565 dw_cfa_opcode(unsigned int insn)
570 static inline unsigned int
571 dw_cfa_operand(unsigned int insn)
577 dwarf_read_encoded_value(struct ex_region_info *ri,
584 int dw_ptr_size, count = 0;
585 long stmp = 0, err = 0;
586 unsigned long utmp, res = 0;
587 struct dwarf_cpu_context *cpu_ctx = this_cpu_ptr(ctx.cpu_ctx);
589 pr_debug("encoding: %#x\n", encoding);
591 dw_ptr_size = cpu_ctx->dw_ptr_size;
593 if (encoding == DW_EH_PE_omit) {
594 pr_debug("DW_EH_PE_omit\n");
598 } else if (encoding == DW_EH_PE_aligned) {
599 unsigned long aligned = ALIGN((unsigned long)addr,
602 pr_debug("DW_EH_PE_aligned\n");
604 if (dw_ptr_size == 4) {
605 *val = read_mmap_data_u32(ri, (u32 *)aligned, st, &err);
606 } else if (dw_ptr_size == 8) {
607 *val = read_mmap_data_u64(ri, (u64 *)aligned, st, &err);
609 pr_err_once("%s: error: encoding\n", __func__);
610 return -QUADD_URC_TBL_IS_CORRUPT;
619 switch (encoding & 0x0f) {
620 case DW_EH_PE_absptr:
621 pr_debug("%s: absptr encoding\n", __func__);
623 if (dw_ptr_size == 4) {
624 *val = read_mmap_data_u32(ri, (u32 *)addr, st, &err);
625 } else if (dw_ptr_size == 8) {
626 *val = read_mmap_data_u64(ri, (u64 *)addr, st, &err);
628 pr_err_once("error: wrong dwarf size\n");
629 return -QUADD_URC_UNHANDLED_INSTRUCTION;
637 case DW_EH_PE_sdata2:
638 case DW_EH_PE_udata2:
639 pr_debug("encoding: DW_EH_PE_sdata2\n");
640 stmp = read_mmap_data_s16(ri, (s16 *)addr, st, &err);
644 count += sizeof(s16);
647 case DW_EH_PE_sdata4:
648 case DW_EH_PE_udata4:
649 pr_debug("encoding: DW_EH_PE_udata4/sdata4\n");
650 stmp = read_mmap_data_s32(ri, (s32 *)addr, st, &err);
654 count += sizeof(s32);
657 case DW_EH_PE_sdata8:
658 case DW_EH_PE_udata8:
659 pr_debug("encoding: DW_EH_PE_udata8\n");
660 stmp = read_mmap_data_s64(ri, (s64 *)addr, st, &err);
664 count += sizeof(s64);
667 case DW_EH_PE_uleb128:
668 pr_debug("encoding: DW_EH_PE_uleb128\n");
669 count += dwarf_read_uleb128(ri, addr, &utmp, st, &err);
676 case DW_EH_PE_sleb128:
677 pr_debug("encoding: DW_EH_PE_sleb128\n");
678 count += dwarf_read_sleb128(ri, addr, &stmp, st, &err);
685 pr_warn_once("%s: warning: encoding: %#x\n",
686 __func__, encoding & 0x0f);
687 return -QUADD_URC_UNHANDLED_INSTRUCTION;
690 switch (encoding & 0x70) {
691 case DW_EH_PE_absptr:
692 pr_debug("DW_EH_PE_absptr\n");
697 pr_debug("DW_EH_PE_pcrel, pcrel_base: %p, stmp: %ld\n",
699 res = (unsigned long)pcrel_base + stmp;
702 case DW_EH_PE_textrel:
703 pr_warn_once("warning: DW_EH_PE_textrel\n");
704 return -QUADD_URC_UNHANDLED_INSTRUCTION;
706 case DW_EH_PE_datarel:
707 pr_warn_once("warning: DW_EH_PE_datarel\n");
708 return -QUADD_URC_UNHANDLED_INSTRUCTION;
710 case DW_EH_PE_funcrel:
711 pr_warn_once("warning: DW_EH_PE_funcrel\n");
712 return -QUADD_URC_UNHANDLED_INSTRUCTION;
715 pr_warn_once("%s: warning: encoding: %#x\n",
716 __func__, encoding & 0x70);
717 return -QUADD_URC_UNHANDLED_INSTRUCTION;
721 if (encoding & DW_EH_PE_indirect) {
722 pr_debug("DW_EH_PE_indirect\n");
724 if (dw_ptr_size == 4) {
725 res = read_mmap_data_u32(ri, (u32 *)res,
727 } else if (dw_ptr_size == 8) {
728 res = read_mmap_data_u64(ri, (u64 *)res,
731 pr_err_once("error: wrong dwarf size\n");
732 return -QUADD_URC_UNHANDLED_INSTRUCTION;
735 /* we ignore links to unloaded sections */
747 dwarf_cfa_exec_insns(struct ex_region_info *ri,
748 unsigned char *insn_start,
749 unsigned char *insn_end,
751 struct stackframe *sf,
756 unsigned char *c_insn;
757 unsigned int expr_len, delta, secid;
758 unsigned long utmp, reg;
759 long offset, stmp, err = 0;
760 struct regs_state *rs, *rs_initial, *rs_stack;
761 struct dwarf_cpu_context *cpu_ctx = this_cpu_ptr(ctx.cpu_ctx);
763 secid = get_secid_frame(is_eh);
766 rs_initial = &sf->rs_initial;
768 rs_stack = cpu_ctx->rs_stack;
773 while (c_insn < insn_end && sf->pc <= pc) {
774 insn = read_mmap_data_u8(ri, c_insn++,
779 switch (dw_cfa_opcode(insn)) {
780 case DW_CFA_advance_loc:
781 delta = dw_cfa_operand(insn);
782 delta *= cie->code_align_factor;
784 pr_debug("DW_CFA_advance_loc: pc: %#lx --> %#lx (delta: %#x)\n",
785 sf->pc - delta, sf->pc, delta);
789 reg = dw_cfa_operand(insn);
790 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
795 offset = utmp * cie->data_align_factor;
796 set_rule_offset(rs, reg, DW_WHERE_CFAREL, offset);
797 pr_debug("DW_CFA_offset: reg: r%lu, offset(addr): %#lx (%ld)\n",
798 reg, offset, offset);
802 reg = dw_cfa_operand(insn);
804 if (!validate_regnum(rs, reg))
807 rs->reg[reg] = rs_initial->reg[reg];
808 pr_debug("DW_CFA_restore: reg: r%lu\n", reg);
814 pr_debug("DW_CFA_nop\n");
817 case DW_CFA_advance_loc1:
818 delta = read_mmap_data_u8(ri, c_insn++,
823 sf->pc += delta * cie->code_align_factor;
824 pr_debug("DW_CFA_advance_loc1: pc: %#lx --> %#lx (delta: %#lx)\n",
825 sf->pc - delta * cie->code_align_factor, sf->pc,
826 delta * cie->code_align_factor);
829 case DW_CFA_advance_loc2:
830 delta = read_mmap_data_u16(ri, (u16 *)c_insn,
836 sf->pc += delta * cie->code_align_factor;
837 pr_debug("DW_CFA_advance_loc2: pc: %#lx --> %#lx (delta: %#lx)\n",
838 sf->pc - delta * cie->code_align_factor, sf->pc,
839 delta * cie->code_align_factor);
842 case DW_CFA_advance_loc4:
843 delta = read_mmap_data_u32(ri, (u32 *)c_insn,
849 sf->pc += delta * cie->code_align_factor;
850 pr_debug("DW_CFA_advance_loc4: pc: %#lx --> %#lx (delta: %#lx)\n",
851 sf->pc - delta * cie->code_align_factor, sf->pc,
852 delta * cie->code_align_factor);
855 case DW_CFA_offset_extended:
856 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
862 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
867 offset = utmp * cie->data_align_factor;
868 pr_debug("DW_CFA_offset_extended: reg: r%lu, offset: %#lx\n",
872 case DW_CFA_restore_extended:
873 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
878 pr_debug("DW_CFA_restore_extended: reg: r%lu\n", reg);
881 case DW_CFA_undefined:
882 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
887 set_rule(rs, reg, DW_WHERE_UNDEF, 0);
888 pr_debug("DW_CFA_undefined: reg: r%lu\n", reg);
892 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
897 rs->cfa_register = utmp;
898 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
903 rs->cfa_offset = utmp;
904 pr_debug("DW_CFA_def_cfa: cfa_register: r%u, cfa_offset: %ld (%#lx)\n",
905 rs->cfa_register, rs->cfa_offset,
909 case DW_CFA_def_cfa_register:
910 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
915 rs->cfa_register = utmp;
916 pr_debug("DW_CFA_def_cfa_register: cfa_register: r%u\n",
920 case DW_CFA_def_cfa_offset:
921 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
926 rs->cfa_offset = utmp;
927 pr_debug("DW_CFA_def_cfa_offset: cfa_offset: %ld (%#lx)\n",
928 rs->cfa_offset, rs->cfa_offset);
931 case DW_CFA_def_cfa_expression:
932 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
939 rs->cfa_expr = c_insn;
940 rs->cfa_expr_len = expr_len;
941 rs->cfa_how = DW_CFA_EXP;
944 pr_debug("DW_CFA_def_cfa_expression: expr_len: %#x\n",
948 case DW_CFA_expression:
949 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
954 set_rule_exp(rs, reg, DW_WHERE_EXPR, c_insn);
956 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
963 pr_debug("DW_CFA_expression: reg: r%lu\n", reg);
966 case DW_CFA_offset_extended_sf:
967 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
972 c_insn += dwarf_read_sleb128(ri, c_insn, &stmp,
977 offset = stmp * cie->data_align_factor;
978 set_rule_offset(rs, reg, DW_WHERE_CFAREL, offset);
979 pr_debug("DW_CFA_offset_extended_sf: reg: r%lu, offset: %#lx\n",
983 case DW_CFA_val_offset:
984 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
989 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
994 offset = utmp * cie->data_align_factor;
995 set_rule_offset(rs, reg, DW_WHERE_VAL_OFFSET, offset);
996 pr_debug("DW_CFA_val_offset: reg: r%lu, offset(addr): %#lx\n",
1000 case DW_CFA_val_offset_sf:
1001 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
1006 c_insn += dwarf_read_sleb128(ri, c_insn, &stmp,
1011 offset = stmp * cie->data_align_factor;
1012 set_rule_offset(rs, reg, DW_WHERE_VAL_OFFSET, offset);
1013 pr_debug("DW_CFA_val_offset_sf: reg: r%lu, offset(addr): %#lx\n",
1017 case DW_CFA_GNU_args_size:
1018 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
1023 pr_debug("DW_CFA_GNU_args_size: offset: %#lx\n", utmp);
1026 case DW_CFA_GNU_negative_offset_extended:
1027 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
1032 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
1037 offset = utmp * cie->data_align_factor;
1038 set_rule_offset(rs, reg, DW_WHERE_CFAREL, -offset);
1039 pr_debug("DW_CFA_GNU_negative_offset_extended: reg: r%lu, offset: %#lx\n",
1043 case DW_CFA_remember_state:
1044 pr_debug("DW_CFA_remember_state\n");
1046 if (cpu_ctx->depth >= DW_MAX_RS_STACK_DEPTH) {
1047 pr_warn_once("error: rs stack was overflowed\n");
1051 rs_stack[cpu_ctx->depth++] = *rs;
1054 case DW_CFA_restore_state:
1055 pr_debug("DW_CFA_restore_state\n");
1057 if (cpu_ctx->depth == 0) {
1058 pr_warn_once("error: rs stack error\n");
1062 *rs = rs_stack[--cpu_ctx->depth];
1065 case DW_CFA_def_cfa_sf:
1066 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
1071 c_insn += dwarf_read_sleb128(ri, c_insn, &stmp,
1076 rs->cfa_register = utmp;
1077 rs->cfa_offset = stmp * cie->data_align_factor;
1078 rs->cfa_how = DW_CFA_REG_OFFSET;
1080 pr_debug("DW_CFA_def_cfa_sf: cfa_register: r%u, cfa_offset: %ld (%#lx)\n",
1081 rs->cfa_register, rs->cfa_offset,
1085 case DW_CFA_def_cfa_offset_sf:
1086 c_insn += dwarf_read_sleb128(ri, c_insn, &stmp,
1091 rs->cfa_offset = stmp * cie->data_align_factor;
1092 pr_debug("DW_CFA_def_cfa_offset_sf: cfa_offset: %ld (%#lx)\n",
1093 rs->cfa_offset, rs->cfa_offset);
1096 case DW_CFA_same_value:
1097 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
1102 set_rule(rs, reg, DW_WHERE_SAME, 0);
1103 pr_debug("DW_CFA_same_value: reg: r%lu\n", reg);
1106 case DW_CFA_val_expression:
1107 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
1112 set_rule_exp(rs, reg, DW_WHERE_VAL_EXPR, c_insn);
1113 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
1119 pr_debug("DW_CFA_val_expression: reg: r%lu\n", reg);
1123 pr_warn_once("warning: unhandled dwarf instr %#x\n",
1133 decode_cie_entry(struct ex_region_info *ri,
1135 unsigned char *entry,
1141 unsigned char *p, *end, *aug;
1142 unsigned int secid, cie_id;
1143 unsigned int cie_version, id, len, max_len;
1145 secid = get_secid_frame(is_eh);
1148 end = entry + length;
1152 id = read_mmap_data_u32(ri, (u32 *)p, secid, &err);
1158 cie_id = is_eh ? 0 : DW_CIE_ID;
1160 pr_err_once("error: incorrect cie_id");
1161 return -QUADD_URC_TBL_IS_CORRUPT;
1164 cie_version = read_mmap_data_u8(ri, p++, secid, &err);
1168 if (cie_version != 1 && cie_version != 3) {
1169 pr_err_once("error: wrong cie_version: %u\n", cie_version);
1170 return -QUADD_URC_TBL_IS_CORRUPT;
1174 return -QUADD_URC_TBL_IS_CORRUPT;
1176 max_len = end - p - 1;
1177 len = strnlen((const char *)p, max_len);
1179 return -QUADD_URC_TBL_IS_CORRUPT;
1181 cie->aug_string = p;
1184 pr_debug("aug_string: %s\n", cie->aug_string);
1186 p += dwarf_read_uleb128(ri, p, &cie->code_align_factor,
1191 p += dwarf_read_sleb128(ri, p, &cie->data_align_factor,
1196 if (cie_version == 1) {
1197 cie->retaddr_reg = read_mmap_data_u8(ri, p++,
1203 p += dwarf_read_uleb128(ri, p, &utmp,
1208 cie->retaddr_reg = utmp;
1211 pr_debug("address column: %u\n", cie->retaddr_reg);
1213 aug = cie->aug_string;
1216 cie->initial_insn = NULL;
1217 cie->initial_insn_len = 0;
1220 p += dwarf_read_uleb128(ri, p, &cie->aug_size,
1225 cie->initial_insn = p + cie->aug_size;
1231 cie->fde_encoding = 0;
1232 cie->lsda_encoding = DW_EH_PE_omit;
1233 cie->personality = NULL;
1235 while (*aug != '\0') {
1237 return -QUADD_URC_TBL_IS_CORRUPT;
1240 cie->lsda_encoding =
1241 read_mmap_data_u8(ri, p++,
1248 } else if (*aug == 'R') {
1250 read_mmap_data_u8(ri, p++,
1257 pr_debug("fde_encoding: %#x\n", cie->fde_encoding);
1258 } else if (*aug == 'P') {
1261 unsigned char handler_encoding;
1262 unsigned long personality;
1264 handler_encoding = *p++;
1266 pcrel_base = (void *)
1267 mmap_addr_to_ex_addr((unsigned long)p,
1271 cnt = dwarf_read_encoded_value(ri, p, pcrel_base,
1276 pr_err_once("%s: error: personality routine\n",
1282 pr_debug("personality: %#lx\n", personality);
1283 cie->personality = (void *)personality;
1285 } else if (*aug == 'S') {
1287 pr_debug("%s: aug: S\n", __func__);
1289 pr_warn_once("%s: warning: unknown aug\n", __func__);
1290 return -QUADD_URC_UNHANDLED_INSTRUCTION;
1295 pr_err_once("%s: error: cie\n", __func__);
1296 return -QUADD_URC_TBL_IS_CORRUPT;
1302 if (!cie->initial_insn)
1303 cie->initial_insn = p;
1305 cie->initial_insn_len = end - cie->initial_insn;
1311 decode_fde_entry(struct ex_region_info *ri,
1313 unsigned char *entry,
1320 unsigned char *p, *end, *pcrel_base;
1321 struct dw_cie *cie = fde->cie;
1323 secid = get_secid_frame(is_eh);
1326 end = entry + length;
1331 pcrel_base = (unsigned char *)
1332 mmap_addr_to_ex_addr((unsigned long)p, ri, secid);
1334 count = dwarf_read_encoded_value(ri, p, pcrel_base,
1335 &fde->initial_location,
1343 fde->address_range = read_mmap_data_u32(ri, (u32 *)p,
1350 if (fde->initial_location < ri->vm_start)
1351 fde->initial_location += ri->vm_start;
1353 pr_debug("pcrel_base: %p\n", pcrel_base);
1354 pr_debug("init location: %#lx\n", fde->initial_location);
1355 pr_debug("address_range: %#lx\n", fde->address_range);
1358 p += dwarf_read_uleb128(ri, p, &utmp,
1367 pr_err_once("%s: error: incorrect fde\n", __func__);
1368 return -QUADD_URC_TBL_IS_CORRUPT;
1371 fde->insn_length = end - p;
1373 if (fde->insn_length > 0)
1374 fde->instructions = p;
1376 fde->instructions = NULL;
1381 static const struct dw_fde_table *
1382 dwarf_bst_find_idx(unsigned long data_base,
1383 struct dw_fde_table *fde_table,
1384 unsigned long length,
1387 unsigned long initial_loc;
1388 struct dw_fde_table *start, *stop;
1389 struct dw_fde_table *mid = NULL;
1391 if (unlikely(!length))
1395 stop = start + length - 1;
1397 initial_loc = dw_bst_get_initial_loc(start, data_base);
1398 if (addr < initial_loc)
1401 initial_loc = dw_bst_get_initial_loc(stop, data_base);
1402 if (addr >= initial_loc)
1405 while (start < stop - 1) {
1406 mid = start + ((stop - start) >> 1);
1408 initial_loc = dw_bst_get_initial_loc(mid, data_base);
1410 if (addr < initial_loc)
1419 static struct dw_fde_table *
1420 dwarf_get_bs_table(struct ex_region_info *ri,
1422 unsigned long length,
1423 unsigned long data_base,
1424 unsigned long *nr_entries,
1427 int count, secid_hdr;
1428 unsigned char *p, *end;
1429 struct dw_fde_table *bst;
1430 unsigned long fde_count, frame_ptr;
1431 struct dw_eh_frame_hdr *hdr = data;
1433 if (length <= sizeof(*hdr))
1436 end = data + length;
1438 pr_debug("hdr: %p\n", hdr);
1440 secid_hdr = get_secid_frame_hdr(is_eh);
1442 if (hdr->version != 1) {
1443 pr_warn_once("warning: unknown eh hdr format\n");
1446 p = (unsigned char *)(hdr + 1);
1448 if (hdr->eh_frame_ptr_enc != DW_EH_PE_omit) {
1449 count = dwarf_read_encoded_value(ri, p, (void *)data_base,
1451 hdr->eh_frame_ptr_enc,
1459 if (hdr->fde_count_enc == DW_EH_PE_omit)
1462 count = dwarf_read_encoded_value(ri, p, (void *)data_base,
1463 &fde_count, hdr->fde_count_enc,
1473 if (fde_count * sizeof(*bst) != end - p)
1476 if (hdr->table_enc != (DW_EH_PE_datarel | DW_EH_PE_sdata4)) {
1477 pr_warn_once("warning: unknown eh hdr format\n");
1481 bst = (struct dw_fde_table *)p;
1482 *nr_entries = fde_count;
1484 pr_debug("bst: %lu fde entries\n", fde_count);
1490 dwarf_decode_fde_cie(struct ex_region_info *ri,
1491 unsigned char *fde_p,
1499 unsigned char *cie_p;
1500 unsigned long cie_pointer, length;
1501 unsigned char *frame_start;
1502 unsigned long frame_len, addr;
1503 struct extab_info *ti;
1505 secid = get_secid_frame(is_eh);
1506 ti = &ri->ex_sec[secid];
1510 frame_start = (unsigned char *)
1511 ex_addr_to_mmap_addr(addr, ri, secid);
1513 frame_len = ti->length;
1515 pr_debug("frame: %p - %p\n",
1516 frame_start, frame_start + frame_len);
1520 length = read_mmap_data_u32(ri, p++, secid, &err);
1524 if (length == 0xffffffff) {
1525 pr_warn_once("warning: 64-bit frame is not supported\n");
1526 return -QUADD_URC_UNHANDLED_INSTRUCTION;
1529 fde->offset = fde_p - frame_start;
1530 fde->length = length + sizeof(u32);
1532 pr_debug("FDE: fde_p: %p, offset: %#lx, len: %#lx\n",
1533 fde_p, fde->offset, fde->length);
1535 cie_pointer = read_mmap_data_u32(ri, p, secid, &err);
1539 fde->cie_pointer = cie_pointer;
1541 cie_p = is_eh ? (unsigned char *)p - cie_pointer :
1542 frame_start + cie_pointer;
1544 length = read_mmap_data_u32(ri, (u32 *)cie_p,
1549 if (length == 0xffffffff) {
1550 pr_warn_once("warning: 64-bit frame is not supported\n");
1551 return -QUADD_URC_UNHANDLED_INSTRUCTION;
1554 cie->offset = cie_p - frame_start;
1555 cie->length = length + sizeof(u32);
1557 pr_debug("CIE: cie_p: %p, offset: %#lx, len: %#lx\n",
1558 cie_p, cie->offset, cie->length);
1560 err = decode_cie_entry(ri, cie, cie_p, cie->length, is_eh);
1566 err = decode_fde_entry(ri, fde, fde_p, fde->length, is_eh);
1574 dwarf_find_fde(struct ex_region_info *ri,
1576 unsigned long length,
1581 int secid, secid_hdr;
1582 const struct dw_fde_table *fi;
1583 unsigned long fde_count = 0, data_base;
1584 unsigned long fde_addr, init_loc;
1585 struct dw_fde_table *bst;
1586 struct extab_info *ti;
1588 secid = get_secid_frame(is_eh);
1589 secid_hdr = get_secid_frame_hdr(is_eh);
1591 ti = &ri->ex_sec[secid_hdr];
1592 data_base = ti->addr;
1594 bst = dwarf_get_bs_table(ri, data, length, data_base,
1596 if (!bst || fde_count == 0) {
1597 pr_warn_once("warning: bs_table\n");
1601 fi = &bst[fde_count - 1];
1602 init_loc = dw_bst_get_initial_loc(fi, data_base);
1604 pr_debug("pc: %#lx, last bst init_loc: %#lx", pc, init_loc);
1606 if (pc >= init_loc) {
1607 unsigned long start, end;
1608 struct extab_info *ti = &ri->ex_sec[secid];
1610 fde_addr = dw_bst_get_fde_addr(fi, data_base);
1611 fde_addr = ex_addr_to_mmap_addr(fde_addr, ri,
1615 return (void *)fde_addr;
1617 if (ti->tf_end > 0) {
1618 start = ti->tf_start;
1624 err = dwarf_decode_fde_cie(ri, (void *)fde_addr,
1629 start = fde.initial_location;
1630 end = start + fde.address_range;
1632 quadd_unwind_set_tail_info(ri->vm_start, secid,
1636 pr_debug("pc: %#lx, last bst entry: %#lx - %#lx",
1639 return (pc >= start && pc < end) ?
1640 (void *)fde_addr : NULL;
1643 fi = dwarf_bst_find_idx(data_base, bst, fde_count, pc);
1647 fde_addr = dw_bst_get_fde_addr(fi, data_base);
1648 fde_addr = ex_addr_to_mmap_addr(fde_addr, ri, secid);
1650 return (void *)fde_addr;
1654 __is_fde_entry_exist(struct ex_region_info *ri, unsigned long addr, int is_eh)
1657 unsigned char *fde_p;
1658 struct extab_info *ti;
1659 unsigned char *hdr_start;
1660 unsigned long hdr_len, a;
1662 secid_hdr = get_secid_frame_hdr(is_eh);
1664 ti = &ri->ex_sec[secid_hdr];
1668 hdr_start = (unsigned char *)
1669 ex_addr_to_mmap_addr(a, ri, secid_hdr);
1671 hdr_len = ti->length;
1673 fde_p = dwarf_find_fde(ri, hdr_start, hdr_len, addr, is_eh);
1675 return fde_p ? 1 : 0;
1679 is_fde_entry_exist(struct ex_region_info *ri,
1687 if (is_frame_present(ri, 1)) {
1688 if (__is_fde_entry_exist(ri, addr, 1))
1692 if (is_frame_present(ri, 0)) {
1693 if (__is_fde_entry_exist(ri, addr, 0))
1697 return (*is_eh || *is_debug) ? 1 : 0;
1701 dwarf_decode(struct ex_region_info *ri,
1709 unsigned char *fde_p;
1710 unsigned char *hdr_start;
1711 unsigned long hdr_len, addr;
1712 struct extab_info *ti;
1714 secid_hdr = get_secid_frame_hdr(is_eh);
1715 ti = &ri->ex_sec[secid_hdr];
1719 hdr_start = (unsigned char *)
1720 ex_addr_to_mmap_addr(addr, ri, secid_hdr);
1722 hdr_len = ti->length;
1724 pr_debug("eh frame hdr: %p - %p\n",
1725 hdr_start, hdr_start + hdr_len);
1727 fde_p = dwarf_find_fde(ri, hdr_start, hdr_len, pc, is_eh);
1729 return -QUADD_URC_IDX_NOT_FOUND;
1731 err = dwarf_decode_fde_cie(ri, fde_p, cie, fde, is_eh);
1735 if (pc < fde->initial_location ||
1736 pc >= fde->initial_location + fde->address_range) {
1737 pr_debug("pc is not in range: %#lx - %#lx\n",
1738 fde->initial_location,
1739 fde->initial_location + fde->address_range);
1740 return -QUADD_URC_IDX_NOT_FOUND;
1746 static long def_cfa(struct stackframe *sf, struct regs_state *rs)
1748 int reg = rs->cfa_register;
1751 if (reg >= QUADD_NUM_REGS)
1752 return -QUADD_URC_TBL_IS_CORRUPT;
1754 pr_debug("r%d --> cfa (%#lx)\n", reg, sf->cfa);
1755 sf->cfa = sf->vregs[reg];
1758 sf->cfa += rs->cfa_offset;
1759 pr_debug("cfa += %#lx (%#lx)\n", rs->cfa_offset, sf->cfa);
1765 unwind_frame(struct ex_region_info *ri,
1766 struct stackframe *sf,
1767 struct vm_area_struct *vma_sp,
1772 unsigned char *insn_end;
1773 unsigned long addr, return_addr, val, user_reg_size;
1776 unsigned long pc = sf->pc;
1777 struct regs_state *rs, *rs_initial;
1778 int mode = sf->mode;
1780 err = dwarf_decode(ri, &cie, &fde, pc, is_eh);
1784 sf->pc = fde.initial_location;
1787 rs_initial = &sf->rs_initial;
1789 rs->cfa_register = -1;
1790 rs_initial->cfa_register = -1;
1792 set_rule(rs, regnum_lr(mode), DW_WHERE_UNDEF, 0);
1794 if (cie.initial_insn) {
1795 insn_end = cie.initial_insn + cie.initial_insn_len;
1796 err = dwarf_cfa_exec_insns(ri, cie.initial_insn,
1797 insn_end, &cie, sf, pc, is_eh);
1802 memcpy(rs_initial, rs, sizeof(*rs));
1804 if (fde.instructions) {
1805 insn_end = fde.instructions + fde.insn_length;
1806 err = dwarf_cfa_exec_insns(ri, fde.instructions,
1807 insn_end, fde.cie, sf, pc, is_eh);
1812 pr_debug("mode: %s\n", (mode == DW_MODE_ARM32) ? "arm32" : "arm64");
1813 pr_debug("initial cfa: %#lx\n", sf->cfa);
1815 user_reg_size = get_user_reg_size(mode);
1817 err = def_cfa(sf, rs);
1821 pr_debug("pc: %#lx, lr: %#lx\n", sf->pc, sf->vregs[regnum_lr(mode)]);
1823 pr_debug("sp: %#lx, fp: %#lx, fp_thumb: %#lx\n",
1824 sf->vregs[regnum_sp(mode)],
1825 sf->vregs[regnum_fp(mode)],
1826 sf->vregs[ARM32_FP_THUMB]);
1828 pr_debug("lr rule: %#lx/%ld (where: %u)\n",
1829 rs->reg[regnum_lr(mode)].loc.reg,
1830 rs->reg[regnum_lr(mode)].loc.offset,
1831 rs->reg[regnum_lr(mode)].where);
1833 pr_debug("fp rule: %#lx/%ld (where: %u)\n",
1834 rs->reg[regnum_fp(mode)].loc.reg,
1835 rs->reg[regnum_fp(mode)].loc.offset,
1836 rs->reg[regnum_fp(mode)].where);
1838 pr_debug("fp_thumb rule: %#lx/%ld (where: %u)\n",
1839 rs->reg[ARM32_FP_THUMB].loc.reg,
1840 rs->reg[ARM32_FP_THUMB].loc.offset,
1841 rs->reg[ARM32_FP_THUMB].where);
1843 pr_debug("cfa_offset: %ld (%#lx)\n",
1844 rs->cfa_offset, rs->cfa_offset);
1845 pr_debug("cfa_register: %u\n", rs->cfa_register);
1846 pr_debug("new cfa: %#lx\n", sf->cfa);
1848 for (i = 0; i < QUADD_NUM_REGS; i++) {
1849 switch (rs->reg[i].where) {
1850 case DW_WHERE_UNDEF:
1856 case DW_WHERE_CFAREL:
1857 addr = sf->cfa + rs->reg[i].loc.offset;
1859 if (!validate_stack_addr(addr, vma_sp, user_reg_size))
1860 return -QUADD_URC_SP_INCORRECT;
1862 if (mode == DW_MODE_ARM32)
1863 err = read_user_data((u32 __user *)addr, val);
1865 err = read_user_data((unsigned long __user *)
1872 pr_debug("[r%d] DW_WHERE_CFAREL: new val: %#lx\n",
1878 pr_err_once("[r%d] error: unsupported rule\n",
1884 return_addr = sf->vregs[regnum_lr(mode)];
1885 pr_debug("return_addr: %#lx\n", return_addr);
1887 if (!validate_pc_addr(return_addr, user_reg_size))
1888 return -QUADD_URC_PC_INCORRECT;
1890 sf->pc = return_addr;
1891 sf->vregs[regnum_sp(mode)] = sf->cfa;
1897 unwind_backtrace(struct quadd_callchain *cc,
1898 struct ex_region_info *ri,
1899 struct stackframe *sf,
1900 struct vm_area_struct *vma_sp,
1901 struct task_struct *task)
1903 unsigned long user_reg_size;
1904 struct ex_region_info ri_new;
1905 unsigned int unw_type;
1906 int is_eh = 1, mode = sf->mode;
1908 cc->urc_dwarf = QUADD_URC_FAILURE;
1909 user_reg_size = get_user_reg_size(mode);
1914 int __is_eh, __is_debug;
1915 struct vm_area_struct *vma_pc;
1916 unsigned long addr, where = sf->pc;
1917 struct mm_struct *mm = task->mm;
1922 sp = sf->vregs[regnum_sp(mode)];
1924 if (!validate_stack_addr(sp, vma_sp, user_reg_size)) {
1925 cc->urc_dwarf = QUADD_URC_SP_INCORRECT;
1929 vma_pc = find_vma(mm, sf->pc);
1933 addr = ri->vm_start;
1935 if (!is_vma_addr(addr, vma_pc, user_reg_size)) {
1936 err = quadd_get_dw_frames(vma_pc->vm_start, &ri_new);
1938 cc->urc_dwarf = QUADD_URC_TBL_NOT_EXIST;
1942 pr_debug("ri: %#lx ---> %#lx",
1943 ri->vm_start, ri_new.vm_start);
1948 if (!is_fde_entry_exist(ri, sf->pc, &__is_eh, &__is_debug)) {
1949 pr_debug("eh/debug fde entries are not existed\n");
1950 cc->urc_dwarf = QUADD_URC_IDX_NOT_FOUND;
1953 pr_debug("is_eh: %d, is_debug: %d\n", __is_eh, __is_debug);
1963 err = unwind_frame(ri, sf, vma_sp, is_eh);
1965 if (__is_eh && __is_debug) {
1968 err = unwind_frame(ri, sf, vma_sp, is_eh);
1970 cc->urc_dwarf = -err;
1974 cc->urc_dwarf = -err;
1979 unw_type = is_eh ? QUADD_UNW_TYPE_DWARF_EH :
1980 QUADD_UNW_TYPE_DWARF_DF;
1982 pr_debug("[%s]: function at [<%08lx>] from [<%08lx>]\n",
1983 is_eh ? "eh" : "debug", where, sf->pc);
1985 cc->curr_sp = sf->vregs[regnum_sp(mode)];
1987 cc->curr_fp = sf->vregs[regnum_fp(mode)];
1988 if (mode == DW_MODE_ARM32)
1989 cc->curr_fp_thumb = sf->vregs[ARM32_FP_THUMB];
1991 cc->curr_pc = sf->pc;
1993 nr_added = quadd_callchain_store(cc, sf->pc, unw_type);
2000 quadd_is_ex_entry_exist_dwarf(struct pt_regs *regs,
2002 struct task_struct *task)
2005 int is_eh, is_debug;
2006 struct ex_region_info ri;
2007 struct vm_area_struct *vma;
2008 struct mm_struct *mm = task->mm;
2013 vma = find_vma(mm, addr);
2017 err = quadd_get_dw_frames(vma->vm_start, &ri);
2021 return is_fde_entry_exist(&ri, addr, &is_eh, &is_debug);
2025 quadd_get_user_cc_dwarf(struct pt_regs *regs,
2026 struct quadd_callchain *cc,
2027 struct task_struct *task)
2030 int i, mode, nr_prev = cc->nr;
2031 unsigned long ip, lr, sp, fp, fp_thumb;
2032 struct vm_area_struct *vma, *vma_sp;
2033 struct mm_struct *mm = task->mm;
2034 struct ex_region_info ri;
2035 struct stackframe sf;
2036 struct dwarf_cpu_context *cpu_ctx = this_cpu_ptr(ctx.cpu_ctx);
2041 if (cc->urc_dwarf == QUADD_URC_LEVEL_TOO_DEEP)
2044 cc->urc_dwarf = QUADD_URC_FAILURE;
2050 fp_thumb = cc->curr_fp_thumb;
2053 ip = instruction_pointer(regs);
2054 lr = quadd_user_link_register(regs);
2055 sp = quadd_user_stack_pointer(regs);
2058 if (compat_user_mode(regs)) {
2059 fp = regs->compat_usr(11);
2060 fp_thumb = regs->compat_usr(7);
2062 fp = regs->regs[29];
2067 fp_thumb = regs->ARM_r7;
2072 if (compat_user_mode(regs))
2073 mode = DW_MODE_ARM32;
2075 mode = DW_MODE_ARM64;
2077 mode = DW_MODE_ARM32;
2080 pr_debug("%s: pc: %#lx, lr: %#lx\n", __func__, ip, lr);
2081 pr_debug("%s: sp: %#lx, fp: %#lx, fp_thumb: %#lx\n",
2082 __func__, sp, fp, fp_thumb);
2084 sf.vregs[regnum_lr(mode)] = lr;
2087 sf.vregs[regnum_sp(mode)] = sp;
2088 sf.vregs[regnum_fp(mode)] = fp;
2090 if (mode == DW_MODE_ARM32)
2091 sf.vregs[ARM32_FP_THUMB] = fp_thumb;
2093 cpu_ctx->dw_ptr_size = (mode == DW_MODE_ARM32) ?
2094 sizeof(u32) : sizeof(u64);
2099 for (i = 0; i < QUADD_NUM_REGS; i++)
2100 set_rule(&sf.rs, i, DW_WHERE_UNDEF, 0);
2102 vma = find_vma(mm, ip);
2106 vma_sp = find_vma(mm, sp);
2110 err = quadd_get_dw_frames(vma->vm_start, &ri);
2112 cc->urc_dwarf = QUADD_URC_TBL_NOT_EXIST;
2116 unwind_backtrace(cc, &ri, &sf, vma_sp, task);
2118 pr_debug("%s: mode: %s, cc->nr: %d --> %d\n", __func__,
2119 (mode == DW_MODE_ARM32) ? "arm32" : "arm64",
2125 int quadd_dwarf_unwind_start(void)
2127 if (!atomic_cmpxchg(&ctx.started, 0, 1)) {
2128 ctx.cpu_ctx = alloc_percpu(struct dwarf_cpu_context);
2130 atomic_set(&ctx.started, 0);
2138 void quadd_dwarf_unwind_stop(void)
2140 if (atomic_cmpxchg(&ctx.started, 1, 0))
2141 free_percpu(ctx.cpu_ctx);
2144 int quadd_dwarf_unwind_init(void)
2146 atomic_set(&ctx.started, 0);