2 * drivers/misc/tegra-profiler/dwarf_unwind.c
4 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
25 #include <asm/unaligned.h>
27 #include <linux/tegra_profiler.h>
30 #include "backtrace.h"
31 #include "eh_unwind.h"
33 #include "dwarf_unwind.h"
36 DW_WHERE_UNDEF, /* register isn't saved at all */
37 DW_WHERE_SAME, /* register has same value as in prev. frame */
38 DW_WHERE_CFAREL, /* register saved at CFA-relative address */
39 DW_WHERE_REG, /* register saved in another register */
40 DW_WHERE_EXPR, /* register saved */
41 DW_WHERE_VAL_OFFSET, /* value offset */
42 DW_WHERE_VAL_EXPR, /* register has computed value */
45 #define QUADD_AARCH64_REGISTERS 32
46 #define QUADD_AARCH32_REGISTERS 16
48 #define QUADD_NUM_REGS QUADD_AARCH64_REGISTERS
73 const unsigned char *exp;
87 struct dw_eh_frame_hdr {
88 unsigned char version;
89 unsigned char eh_frame_ptr_enc;
90 unsigned char fde_count_enc;
91 unsigned char table_enc;
100 struct reg_info reg[QUADD_NUM_REGS];
105 unsigned char *cfa_expr;
106 unsigned int cfa_expr_len;
111 #define DW_MAX_RS_STACK_DEPTH 8
113 struct dwarf_cpu_context {
114 struct regs_state rs_stack[DW_MAX_RS_STACK_DEPTH];
120 struct quadd_dwarf_context {
121 struct dwarf_cpu_context __percpu *cpu_ctx;
127 unsigned long vregs[QUADD_NUM_REGS];
129 struct regs_state rs;
130 struct regs_state rs_initial;
138 unsigned long offset;
139 unsigned long length;
141 unsigned char *aug_string;
142 unsigned long aug_size;
144 unsigned char fde_encoding;
145 unsigned char lsda_encoding;
147 unsigned long code_align_factor;
148 long data_align_factor;
150 unsigned int initial_insn_len;
151 unsigned char *initial_insn;
154 unsigned int retaddr_reg;
161 unsigned long offset;
162 unsigned long length;
164 unsigned long cie_pointer;
167 unsigned long initial_location;
168 unsigned long address_range;
170 unsigned int insn_length;
171 unsigned char *instructions;
182 #define read_user_data(addr, retval) \
186 pagefault_disable(); \
187 ret = __get_user(retval, addr); \
188 pagefault_enable(); \
191 pr_debug("%s: failed for address: %p\n", \
193 ret = -QUADD_URC_EACCESS; \
199 static struct quadd_dwarf_context ctx;
201 static inline int regnum_sp(int mode)
203 return (mode == DW_MODE_ARM32) ?
207 static inline int regnum_fp(int mode)
209 return (mode == DW_MODE_ARM32) ?
213 static inline int regnum_lr(int mode)
215 return (mode == DW_MODE_ARM32) ?
219 static inline unsigned long
220 get_user_reg_size(int mode)
222 return (mode == DW_MODE_ARM32) ?
223 sizeof(u32) : sizeof(u64);
227 validate_addr(struct ex_region_info *ri,
229 unsigned long nbytes,
232 struct extab_info *ti;
233 struct quadd_mmap_area *mmap;
234 unsigned long start, end;
238 ti = &ri->ex_sec[st];
240 start = (unsigned long)mmap->data + ti->mmap_offset;
241 end = start + ti->length;
243 if (unlikely(addr < start || addr > end - nbytes)) {
244 pr_err_once("%s: error: addr: %#lx, len: %ld, data: %#lx-%#lx\n",
245 __func__, addr, nbytes, start, end);
253 read_mmap_data_u8(struct ex_region_info *ri,
254 const u8 *addr, int st, long *err)
256 unsigned long a = (unsigned long)addr;
258 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
259 *err = -QUADD_URC_EACCESS;
268 read_mmap_data_u16(struct ex_region_info *ri,
269 const u16 *addr, int st, long *err)
271 unsigned long a = (unsigned long)addr;
273 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
274 *err = -QUADD_URC_EACCESS;
280 return get_unaligned(addr);
284 read_mmap_data_s16(struct ex_region_info *ri,
285 const s16 *addr, int st, long *err)
287 unsigned long a = (unsigned long)addr;
289 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
290 *err = -QUADD_URC_EACCESS;
296 return get_unaligned(addr);
300 read_mmap_data_u32(struct ex_region_info *ri,
301 const u32 *addr, int st, long *err)
303 unsigned long a = (unsigned long)addr;
305 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
306 *err = -QUADD_URC_EACCESS;
312 return get_unaligned(addr);
316 read_mmap_data_s32(struct ex_region_info *ri,
317 const s32 *addr, int st, long *err)
319 unsigned long a = (unsigned long)addr;
321 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
322 *err = -QUADD_URC_EACCESS;
328 return get_unaligned(addr);
332 read_mmap_data_s64(struct ex_region_info *ri,
333 const s64 *addr, int st, long *err)
335 unsigned long a = (unsigned long)addr;
337 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
338 *err = -QUADD_URC_EACCESS;
344 return get_unaligned(addr);
348 read_mmap_data_u64(struct ex_region_info *ri,
349 const u64 *addr, int st, long *err)
351 unsigned long a = (unsigned long)addr;
353 if (unlikely(!validate_addr(ri, a, sizeof(*addr), st))) {
354 *err = -QUADD_URC_EACCESS;
360 return get_unaligned(addr);
363 static inline unsigned long
364 ex_addr_to_mmap_addr(unsigned long addr,
365 struct ex_region_info *ri, int st)
367 unsigned long offset;
368 struct extab_info *ti;
370 ti = &ri->ex_sec[st];
371 offset = addr - ti->addr;
373 return ti->mmap_offset + offset + (unsigned long)ri->mmap->data;
376 static inline unsigned long
377 mmap_addr_to_ex_addr(unsigned long addr,
378 struct ex_region_info *ri, int st)
380 unsigned long offset;
381 struct extab_info *ti;
383 ti = &ri->ex_sec[st];
384 offset = addr - ti->mmap_offset - (unsigned long)ri->mmap->data;
386 return ti->addr + offset;
389 static inline int validate_regnum(struct regs_state *rs, int regnum)
391 if (unlikely(regnum >= ARRAY_SIZE(rs->reg))) {
392 pr_err_once("error: invalid reg: %d\n", regnum);
400 set_rule_offset(struct regs_state *rs, int regnum, int where, long offset)
404 if (!validate_regnum(rs, regnum))
407 r = &rs->reg[regnum];
410 r->loc.offset = offset;
414 set_rule_reg(struct regs_state *rs, int regnum, int where, unsigned long reg)
418 if (!validate_regnum(rs, regnum))
421 r = &rs->reg[regnum];
428 set_rule_exp(struct regs_state *rs, int regnum,
429 int where, const unsigned char *exp)
433 if (!validate_regnum(rs, regnum))
436 r = &rs->reg[regnum];
443 set_rule(struct regs_state *rs, int regnum, int where, long value)
445 set_rule_offset(rs, regnum, where, value);
448 static inline unsigned long
449 dw_bst_get_initial_loc(const struct dw_fde_table *fi,
450 unsigned long data_base)
452 return data_base + fi->initial_loc;
455 static inline unsigned long
456 dw_bst_get_fde_addr(const struct dw_fde_table *fi,
457 unsigned long data_base)
459 return data_base + fi->fde;
462 static inline unsigned long
463 dwarf_read_uleb128(struct ex_region_info *ri,
469 unsigned long result;
478 byte = read_mmap_data_u8(ri, addr, st, err);
485 result |= (byte & 0x7f) << shift;
497 static inline unsigned long
498 dwarf_read_sleb128(struct ex_region_info *ri,
514 byte = read_mmap_data_u8(ri, addr, st, err);
519 result |= (byte & 0x7f) << shift;
527 num_bits = 8 * sizeof(result);
529 if ((shift < num_bits) && (byte & 0x40))
530 result |= (-1 << shift);
537 static inline unsigned int
538 dw_cfa_opcode(unsigned int insn)
543 static inline unsigned int
544 dw_cfa_operand(unsigned int insn)
550 dwarf_read_encoded_value(struct ex_region_info *ri,
557 int dw_word_size, count = 0;
558 long stmp = 0, err = 0;
559 unsigned long utmp, res = 0;
560 struct dwarf_cpu_context *cpu_ctx = this_cpu_ptr(ctx.cpu_ctx);
562 pr_debug("encoding: %#x\n", encoding);
564 dw_word_size = cpu_ctx->dw_word_size;
566 if (encoding == DW_EH_PE_omit) {
567 pr_debug("DW_EH_PE_omit\n");
571 } else if (encoding == DW_EH_PE_aligned) {
572 unsigned long aligned = ALIGN((unsigned long)addr,
575 pr_debug("DW_EH_PE_aligned\n");
577 if (dw_word_size == 4) {
578 *val = read_mmap_data_u32(ri, (u32 *)aligned, st, &err);
579 } else if (dw_word_size == 8) {
580 *val = read_mmap_data_u64(ri, (u64 *)aligned, st, &err);
582 pr_err_once("%s: error: encoding\n", __func__);
583 return -QUADD_URC_TBL_IS_CORRUPT;
592 switch (encoding & 0x0f) {
593 case DW_EH_PE_absptr:
594 pr_debug("%s: absptr encoding\n", __func__);
596 if (dw_word_size == 4) {
597 *val = read_mmap_data_u32(ri, (u32 *)addr, st, &err);
598 } else if (dw_word_size == 8) {
599 *val = read_mmap_data_u64(ri, (u64 *)addr, st, &err);
601 pr_err_once("error: wrong dwarf size\n");
602 return -QUADD_URC_UNHANDLED_INSTRUCTION;
610 case DW_EH_PE_sdata2:
611 case DW_EH_PE_udata2:
612 pr_debug("encoding: DW_EH_PE_sdata2\n");
613 stmp = read_mmap_data_s16(ri, (s16 *)addr, st, &err);
617 count += sizeof(s16);
620 case DW_EH_PE_sdata4:
621 case DW_EH_PE_udata4:
622 pr_debug("encoding: DW_EH_PE_udata4/sdata4\n");
623 stmp = read_mmap_data_s32(ri, (s32 *)addr, st, &err);
627 count += sizeof(s32);
630 case DW_EH_PE_sdata8:
631 case DW_EH_PE_udata8:
632 pr_debug("encoding: DW_EH_PE_udata8\n");
633 stmp = read_mmap_data_s64(ri, (s64 *)addr, st, &err);
637 count += sizeof(s64);
640 case DW_EH_PE_uleb128:
641 pr_debug("encoding: DW_EH_PE_uleb128\n");
642 count += dwarf_read_uleb128(ri, addr, &utmp, st, &err);
649 case DW_EH_PE_sleb128:
650 pr_debug("encoding: DW_EH_PE_sleb128\n");
651 count += dwarf_read_sleb128(ri, addr, &stmp, st, &err);
658 pr_warn_once("%s: warning: encoding: %#x\n",
659 __func__, encoding & 0x0f);
660 return -QUADD_URC_UNHANDLED_INSTRUCTION;
663 switch (encoding & 0x70) {
664 case DW_EH_PE_absptr:
665 pr_debug("DW_EH_PE_absptr\n");
670 pr_debug("DW_EH_PE_pcrel, pcrel_base: %p, stmp: %ld\n",
672 res = (unsigned long)pcrel_base + stmp;
675 case DW_EH_PE_textrel:
676 pr_warn_once("warning: DW_EH_PE_textrel\n");
677 return -QUADD_URC_UNHANDLED_INSTRUCTION;
679 case DW_EH_PE_datarel:
680 pr_warn_once("warning: DW_EH_PE_datarel\n");
681 return -QUADD_URC_UNHANDLED_INSTRUCTION;
683 case DW_EH_PE_funcrel:
684 pr_warn_once("warning: DW_EH_PE_funcrel\n");
685 return -QUADD_URC_UNHANDLED_INSTRUCTION;
688 pr_warn_once("%s: warning: encoding: %#x\n",
689 __func__, encoding & 0x70);
690 return -QUADD_URC_UNHANDLED_INSTRUCTION;
694 if (encoding & DW_EH_PE_indirect) {
695 pr_debug("DW_EH_PE_indirect\n");
697 if (dw_word_size == 4) {
698 res = read_mmap_data_u32(ri, (u32 *)res,
700 } else if (dw_word_size == 8) {
701 res = read_mmap_data_u64(ri, (u64 *)res,
704 pr_err_once("error: wrong dwarf size\n");
705 return -QUADD_URC_UNHANDLED_INSTRUCTION;
708 /* we ignore links to unloaded sections */
720 dwarf_cfa_exec_insns(struct ex_region_info *ri,
721 unsigned char *insn_start,
722 unsigned char *insn_end,
724 struct stackframe *sf,
728 unsigned char *c_insn;
729 unsigned int expr_len, delta;
730 unsigned long utmp, reg;
731 long offset, stmp, err = 0;
732 struct regs_state *rs, *rs_initial, *rs_stack;
733 struct dwarf_cpu_context *cpu_ctx = this_cpu_ptr(ctx.cpu_ctx);
736 rs_initial = &sf->rs_initial;
738 rs_stack = cpu_ctx->rs_stack;
743 while (c_insn < insn_end && sf->pc <= pc) {
744 insn = read_mmap_data_u8(ri, c_insn++,
745 QUADD_SEC_TYPE_EH_FRAME, &err);
749 switch (dw_cfa_opcode(insn)) {
750 case DW_CFA_advance_loc:
751 delta = dw_cfa_operand(insn);
752 delta *= cie->code_align_factor;
754 pr_debug("DW_CFA_advance_loc: pc: %#lx --> %#lx (delta: %#x)\n",
755 sf->pc - delta, sf->pc, delta);
759 reg = dw_cfa_operand(insn);
760 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
761 QUADD_SEC_TYPE_EH_FRAME,
766 offset = utmp * cie->data_align_factor;
767 set_rule_offset(rs, reg, DW_WHERE_CFAREL, offset);
768 pr_debug("DW_CFA_offset: reg: r%lu, offset(addr): %#lx (%ld)\n",
769 reg, offset, offset);
773 reg = dw_cfa_operand(insn);
775 if (!validate_regnum(rs, reg))
778 rs->reg[reg] = rs_initial->reg[reg];
779 pr_debug("DW_CFA_restore: reg: r%lu\n", reg);
785 pr_debug("DW_CFA_nop\n");
788 case DW_CFA_advance_loc1:
789 delta = read_mmap_data_u8(ri, c_insn++,
790 QUADD_SEC_TYPE_EH_FRAME,
795 sf->pc += delta * cie->code_align_factor;
796 pr_debug("DW_CFA_advance_loc1: pc: %#lx --> %#lx (delta: %#lx)\n",
797 sf->pc - delta * cie->code_align_factor, sf->pc,
798 delta * cie->code_align_factor);
801 case DW_CFA_advance_loc2:
802 delta = read_mmap_data_u16(ri, (u16 *)c_insn,
803 QUADD_SEC_TYPE_EH_FRAME,
809 sf->pc += delta * cie->code_align_factor;
810 pr_debug("DW_CFA_advance_loc2: pc: %#lx --> %#lx (delta: %#lx)\n",
811 sf->pc - delta * cie->code_align_factor, sf->pc,
812 delta * cie->code_align_factor);
815 case DW_CFA_advance_loc4:
816 delta = read_mmap_data_u32(ri, (u32 *)c_insn,
817 QUADD_SEC_TYPE_EH_FRAME,
823 sf->pc += delta * cie->code_align_factor;
824 pr_debug("DW_CFA_advance_loc4: pc: %#lx --> %#lx (delta: %#lx)\n",
825 sf->pc - delta * cie->code_align_factor, sf->pc,
826 delta * cie->code_align_factor);
829 case DW_CFA_offset_extended:
830 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
831 QUADD_SEC_TYPE_EH_FRAME,
837 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
838 QUADD_SEC_TYPE_EH_FRAME,
843 offset = utmp * cie->data_align_factor;
844 pr_debug("DW_CFA_offset_extended: reg: r%lu, offset: %#lx\n",
848 case DW_CFA_restore_extended:
849 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
850 QUADD_SEC_TYPE_EH_FRAME,
855 pr_debug("DW_CFA_restore_extended: reg: r%lu\n", reg);
858 case DW_CFA_undefined:
859 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
860 QUADD_SEC_TYPE_EH_FRAME,
865 set_rule(rs, reg, DW_WHERE_UNDEF, 0);
866 pr_debug("DW_CFA_undefined: reg: r%lu\n", reg);
870 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
871 QUADD_SEC_TYPE_EH_FRAME,
876 rs->cfa_register = utmp;
877 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
878 QUADD_SEC_TYPE_EH_FRAME,
883 rs->cfa_offset = utmp;
884 pr_debug("DW_CFA_def_cfa: cfa_register: r%u, cfa_offset: %ld (%#lx)\n",
885 rs->cfa_register, rs->cfa_offset,
889 case DW_CFA_def_cfa_register:
890 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
891 QUADD_SEC_TYPE_EH_FRAME,
896 rs->cfa_register = utmp;
897 pr_debug("DW_CFA_def_cfa_register: cfa_register: r%u\n",
901 case DW_CFA_def_cfa_offset:
902 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
903 QUADD_SEC_TYPE_EH_FRAME,
908 rs->cfa_offset = utmp;
909 pr_debug("DW_CFA_def_cfa_offset: cfa_offset: %ld (%#lx)\n",
910 rs->cfa_offset, rs->cfa_offset);
913 case DW_CFA_def_cfa_expression:
914 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
915 QUADD_SEC_TYPE_EH_FRAME,
922 rs->cfa_expr = c_insn;
923 rs->cfa_expr_len = expr_len;
924 rs->cfa_how = DW_CFA_EXP;
927 pr_debug("DW_CFA_def_cfa_expression: expr_len: %#x\n",
931 case DW_CFA_expression:
932 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
933 QUADD_SEC_TYPE_EH_FRAME,
938 set_rule_exp(rs, reg, DW_WHERE_EXPR, c_insn);
940 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
941 QUADD_SEC_TYPE_EH_FRAME,
948 pr_debug("DW_CFA_expression: reg: r%lu\n", reg);
951 case DW_CFA_offset_extended_sf:
952 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
953 QUADD_SEC_TYPE_EH_FRAME,
958 c_insn += dwarf_read_sleb128(ri, c_insn, &stmp,
959 QUADD_SEC_TYPE_EH_FRAME,
964 offset = stmp * cie->data_align_factor;
965 set_rule_offset(rs, reg, DW_WHERE_CFAREL, offset);
966 pr_debug("DW_CFA_offset_extended_sf: reg: r%lu, offset: %#lx\n",
970 case DW_CFA_val_offset:
971 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
972 QUADD_SEC_TYPE_EH_FRAME,
977 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
978 QUADD_SEC_TYPE_EH_FRAME,
983 offset = utmp * cie->data_align_factor;
984 set_rule_offset(rs, reg, DW_WHERE_VAL_OFFSET, offset);
985 pr_debug("DW_CFA_val_offset: reg: r%lu, offset(addr): %#lx\n",
989 case DW_CFA_val_offset_sf:
990 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
991 QUADD_SEC_TYPE_EH_FRAME, &err);
995 c_insn += dwarf_read_sleb128(ri, c_insn, &stmp,
996 QUADD_SEC_TYPE_EH_FRAME,
1001 offset = stmp * cie->data_align_factor;
1002 set_rule_offset(rs, reg, DW_WHERE_VAL_OFFSET, offset);
1003 pr_debug("DW_CFA_val_offset_sf: reg: r%lu, offset(addr): %#lx\n",
1007 case DW_CFA_GNU_args_size:
1008 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
1009 QUADD_SEC_TYPE_EH_FRAME,
1014 pr_debug("DW_CFA_GNU_args_size: offset: %#lx\n", utmp);
1017 case DW_CFA_GNU_negative_offset_extended:
1018 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
1019 QUADD_SEC_TYPE_EH_FRAME,
1024 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
1025 QUADD_SEC_TYPE_EH_FRAME,
1030 offset = utmp * cie->data_align_factor;
1031 set_rule_offset(rs, reg, DW_WHERE_CFAREL, -offset);
1032 pr_debug("DW_CFA_GNU_negative_offset_extended: reg: r%lu, offset: %#lx\n",
1036 case DW_CFA_remember_state:
1037 pr_debug("DW_CFA_remember_state\n");
1039 if (cpu_ctx->depth >= DW_MAX_RS_STACK_DEPTH) {
1040 pr_warn_once("error: rs stack was overflowed\n");
1044 rs_stack[cpu_ctx->depth++] = *rs;
1047 case DW_CFA_restore_state:
1048 pr_debug("DW_CFA_restore_state\n");
1050 if (cpu_ctx->depth == 0) {
1051 pr_warn_once("error: rs stack error\n");
1055 *rs = rs_stack[--cpu_ctx->depth];
1058 case DW_CFA_def_cfa_sf:
1059 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
1060 QUADD_SEC_TYPE_EH_FRAME,
1065 c_insn += dwarf_read_sleb128(ri, c_insn, &stmp,
1066 QUADD_SEC_TYPE_EH_FRAME,
1071 rs->cfa_register = utmp;
1072 rs->cfa_offset = stmp * cie->data_align_factor;
1073 rs->cfa_how = DW_CFA_REG_OFFSET;
1075 pr_debug("DW_CFA_def_cfa_sf: cfa_register: r%u, cfa_offset: %ld (%#lx)\n",
1076 rs->cfa_register, rs->cfa_offset,
1080 case DW_CFA_def_cfa_offset_sf:
1081 c_insn += dwarf_read_sleb128(ri, c_insn, &stmp,
1082 QUADD_SEC_TYPE_EH_FRAME,
1087 rs->cfa_offset = stmp * cie->data_align_factor;
1088 pr_debug("DW_CFA_def_cfa_offset_sf: cfa_offset: %ld (%#lx)\n",
1089 rs->cfa_offset, rs->cfa_offset);
1092 case DW_CFA_same_value:
1093 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
1094 QUADD_SEC_TYPE_EH_FRAME,
1099 set_rule(rs, reg, DW_WHERE_SAME, 0);
1100 pr_debug("DW_CFA_same_value: reg: r%lu\n", reg);
1103 case DW_CFA_val_expression:
1104 c_insn += dwarf_read_uleb128(ri, c_insn, ®,
1105 QUADD_SEC_TYPE_EH_FRAME,
1110 set_rule_exp(rs, reg, DW_WHERE_VAL_EXPR, c_insn);
1111 c_insn += dwarf_read_uleb128(ri, c_insn, &utmp,
1112 QUADD_SEC_TYPE_EH_FRAME,
1118 pr_debug("DW_CFA_val_expression: reg: r%lu\n", reg);
1122 pr_warn_once("warning: unhandled dwarf instr %#x\n",
1132 decode_cie_entry(struct ex_region_info *ri,
1134 unsigned char *entry,
1139 unsigned char *p, *end, *aug;
1140 unsigned int cie_version, id, len, max_len;
1143 end = entry + length;
1147 id = read_mmap_data_u32(ri, (u32 *)p, QUADD_SEC_TYPE_EH_FRAME, &err);
1154 return -QUADD_URC_TBL_IS_CORRUPT;
1156 cie_version = read_mmap_data_u8(ri, p++, QUADD_SEC_TYPE_EH_FRAME, &err);
1160 if (cie_version != 1 && cie_version != 3) {
1161 pr_err_once("error: wrong cie_version: %u\n", cie_version);
1162 return -QUADD_URC_TBL_IS_CORRUPT;
1166 return -QUADD_URC_TBL_IS_CORRUPT;
1168 max_len = end - p - 1;
1169 len = strnlen((const char *)p, max_len);
1171 return -QUADD_URC_TBL_IS_CORRUPT;
1173 cie->aug_string = p;
1176 pr_debug("aug_string: %s\n", cie->aug_string);
1178 p += dwarf_read_uleb128(ri, p, &cie->code_align_factor,
1179 QUADD_SEC_TYPE_EH_FRAME, &err);
1183 p += dwarf_read_sleb128(ri, p, &cie->data_align_factor,
1184 QUADD_SEC_TYPE_EH_FRAME, &err);
1188 if (cie_version == 1) {
1189 cie->retaddr_reg = read_mmap_data_u8(ri, p++,
1190 QUADD_SEC_TYPE_EH_FRAME,
1195 p += dwarf_read_uleb128(ri, p, &utmp,
1196 QUADD_SEC_TYPE_EH_FRAME, &err);
1200 cie->retaddr_reg = utmp;
1203 pr_debug("address column: %u\n", cie->retaddr_reg);
1205 aug = cie->aug_string;
1208 cie->initial_insn = NULL;
1209 cie->initial_insn_len = 0;
1212 p += dwarf_read_uleb128(ri, p, &cie->aug_size,
1213 QUADD_SEC_TYPE_EH_FRAME, &err);
1217 cie->initial_insn = p + cie->aug_size;
1222 pr_warn_once("warning: !aug_z\n");
1225 cie->fde_encoding = 0;
1226 cie->lsda_encoding = DW_EH_PE_omit;
1227 cie->personality = NULL;
1229 while (*aug != '\0') {
1231 return -QUADD_URC_TBL_IS_CORRUPT;
1234 cie->lsda_encoding =
1235 read_mmap_data_u8(ri, p++,
1236 QUADD_SEC_TYPE_EH_FRAME,
1242 } else if (*aug == 'R') {
1244 read_mmap_data_u8(ri, p++,
1245 QUADD_SEC_TYPE_EH_FRAME,
1251 pr_debug("fde_encoding: %#x\n", cie->fde_encoding);
1252 } else if (*aug == 'P') {
1255 unsigned char handler_encoding;
1256 unsigned long personality;
1258 handler_encoding = *p++;
1260 pcrel_base = (void *)
1261 mmap_addr_to_ex_addr((unsigned long)p,
1263 QUADD_SEC_TYPE_EH_FRAME);
1265 cnt = dwarf_read_encoded_value(ri, p, pcrel_base,
1268 QUADD_SEC_TYPE_EH_FRAME);
1270 pr_err_once("%s: error: personality routine\n",
1276 pr_debug("personality: %#lx\n", personality);
1277 cie->personality = (void *)personality;
1279 } else if (*aug == 'S') {
1281 pr_debug("%s: aug: S\n", __func__);
1283 pr_warn_once("%s: warning: unknown aug\n", __func__);
1284 return -QUADD_URC_UNHANDLED_INSTRUCTION;
1289 pr_err_once("%s: error: cie\n", __func__);
1290 return -QUADD_URC_TBL_IS_CORRUPT;
1296 if (!cie->initial_insn)
1297 cie->initial_insn = p;
1299 cie->initial_insn_len = end - cie->initial_insn;
1305 decode_fde_entry(struct ex_region_info *ri,
1307 unsigned char *entry,
1313 unsigned char *p, *end, *pcrel_base;
1314 struct dw_cie *cie = fde->cie;
1317 end = entry + length;
1322 pcrel_base = (unsigned char *)
1323 mmap_addr_to_ex_addr((unsigned long)p, ri,
1324 QUADD_SEC_TYPE_EH_FRAME);
1326 count = dwarf_read_encoded_value(ri, p, pcrel_base,
1327 &fde->initial_location,
1329 QUADD_SEC_TYPE_EH_FRAME);
1335 fde->address_range = read_mmap_data_u32(ri, (u32 *)p,
1336 QUADD_SEC_TYPE_EH_FRAME, &err);
1342 pr_debug("init location: %#lx\n", fde->initial_location);
1343 pr_debug("address_range: %#lx\n", fde->address_range);
1346 p += dwarf_read_uleb128(ri, p, &utmp,
1347 QUADD_SEC_TYPE_EH_FRAME, &err);
1355 pr_err_once("%s: error: incorrect fde\n", __func__);
1356 return -QUADD_URC_TBL_IS_CORRUPT;
1359 fde->insn_length = end - p;
1361 if (fde->insn_length > 0)
1362 fde->instructions = p;
1364 fde->instructions = NULL;
1369 static const struct dw_fde_table *
1370 dwarf_bst_find_idx(unsigned long data_base,
1371 struct dw_fde_table *fde_table,
1372 unsigned long length,
1375 unsigned long initial_loc;
1376 struct dw_fde_table *start, *stop;
1377 struct dw_fde_table *mid = NULL;
1379 if (unlikely(!length))
1383 stop = start + length - 1;
1385 initial_loc = dw_bst_get_initial_loc(start, data_base);
1386 if (addr < initial_loc)
1389 initial_loc = dw_bst_get_initial_loc(stop, data_base);
1390 if (addr >= initial_loc)
1393 while (start < stop - 1) {
1394 mid = start + ((stop - start) >> 1);
1396 initial_loc = dw_bst_get_initial_loc(mid, data_base);
1398 if (addr < initial_loc)
1407 static struct dw_fde_table *
1408 dwarf_get_bs_table(struct ex_region_info *ri,
1410 unsigned long length,
1411 unsigned long data_base,
1412 unsigned long *nr_entries)
1415 unsigned char *p, *end;
1416 struct dw_fde_table *bst;
1417 unsigned long fde_count, frame_ptr;
1418 struct dw_eh_frame_hdr *hdr = data;
1420 if (length <= sizeof(*hdr))
1423 end = data + length;
1425 pr_debug("hdr: %p\n", hdr);
1427 if (hdr->version != 1) {
1428 pr_warn_once("warning: unknown eh hdr format\n");
1431 p = (unsigned char *)(hdr + 1);
1433 if (hdr->eh_frame_ptr_enc != DW_EH_PE_omit) {
1434 count = dwarf_read_encoded_value(ri, p, (void *)data_base,
1436 hdr->eh_frame_ptr_enc,
1437 QUADD_SEC_TYPE_EH_FRAME_HDR);
1444 if (hdr->fde_count_enc == DW_EH_PE_omit)
1447 count = dwarf_read_encoded_value(ri, p, (void *)data_base,
1448 &fde_count, hdr->fde_count_enc,
1449 QUADD_SEC_TYPE_EH_FRAME_HDR);
1458 if (fde_count * sizeof(*bst) != end - p)
1461 if (hdr->table_enc != (DW_EH_PE_datarel | DW_EH_PE_sdata4)) {
1462 pr_warn_once("warning: unknown eh hdr format\n");
1466 bst = (struct dw_fde_table *)p;
1467 *nr_entries = fde_count;
1473 dwarf_decode_fde_cie(struct ex_region_info *ri,
1474 unsigned char *fde_p,
1480 unsigned char *cie_p;
1481 unsigned long cie_pointer, length;
1482 unsigned char *frame_start;
1483 unsigned long frame_len, addr;
1484 struct extab_info *ti;
1486 ti = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME];
1490 frame_start = (unsigned char *)
1491 ex_addr_to_mmap_addr(addr, ri, QUADD_SEC_TYPE_EH_FRAME);
1493 frame_len = ti->length;
1495 pr_debug("eh frame: %p - %p\n",
1496 frame_start, frame_start + frame_len);
1500 length = read_mmap_data_u32(ri, p++, QUADD_SEC_TYPE_EH_FRAME, &err);
1504 if (length == 0xffffffff) {
1505 pr_warn_once("warning: 64-bit .eh_frame is not supported\n");
1506 return -QUADD_URC_UNHANDLED_INSTRUCTION;
1509 fde->offset = fde_p - frame_start;
1510 fde->length = length + sizeof(u32);
1512 pr_debug("FDE: fde_p: %p, offset: %#lx, len: %#lx\n",
1513 fde_p, fde->offset, fde->length);
1515 cie_pointer = read_mmap_data_u32(ri, p, QUADD_SEC_TYPE_EH_FRAME, &err);
1519 fde->cie_pointer = cie_pointer;
1520 cie_p = (unsigned char *)p - cie_pointer;
1522 length = read_mmap_data_u32(ri, (u32 *)cie_p,
1523 QUADD_SEC_TYPE_EH_FRAME, &err);
1527 if (length == 0xffffffff) {
1528 pr_warn_once("warning: 64-bit .eh_frame is not supported\n");
1529 return -QUADD_URC_UNHANDLED_INSTRUCTION;
1532 cie->offset = cie_p - frame_start;
1533 cie->length = length + sizeof(u32);
1535 pr_debug("CIE: cie_p: %p, offset: %#lx, len: %#lx\n",
1536 cie_p, cie->offset, cie->length);
1538 err = decode_cie_entry(ri, cie, cie_p, cie->length);
1544 err = decode_fde_entry(ri, fde, fde_p, fde->length);
1552 dwarf_find_fde(struct ex_region_info *ri,
1554 unsigned long length,
1558 const struct dw_fde_table *fi;
1559 unsigned long fde_count = 0, data_base;
1560 unsigned long fde_addr, init_loc;
1561 struct dw_fde_table *bst;
1562 struct extab_info *ti;
1564 ti = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME_HDR];
1565 data_base = ti->addr;
1567 bst = dwarf_get_bs_table(ri, data, length, data_base, &fde_count);
1568 if (!bst || fde_count == 0) {
1569 pr_warn_once("warning: bs_table\n");
1573 fi = &bst[fde_count - 1];
1574 init_loc = dw_bst_get_initial_loc(fi, data_base);
1576 if (pc >= init_loc) {
1577 unsigned long start, end;
1579 fde_addr = dw_bst_get_fde_addr(fi, data_base);
1580 fde_addr = ex_addr_to_mmap_addr(fde_addr, ri,
1581 QUADD_SEC_TYPE_EH_FRAME);
1584 return (void *)fde_addr;
1586 if (ri->tf_end > 0) {
1587 start = ri->tf_start;
1593 err = dwarf_decode_fde_cie(ri, (void *)fde_addr,
1598 start = fde.initial_location;
1599 end = start + fde.address_range;
1601 quadd_unwind_set_tail_info(ri->vm_start, start, end);
1604 return (pc >= start && pc < end) ?
1605 (void *)fde_addr : NULL;
1608 fi = dwarf_bst_find_idx(data_base, bst, fde_count, pc);
1612 fde_addr = dw_bst_get_fde_addr(fi, data_base);
1613 fde_addr = ex_addr_to_mmap_addr(fde_addr, ri, QUADD_SEC_TYPE_EH_FRAME);
1615 return (void *)fde_addr;
1619 dwarf_decode(struct ex_region_info *ri,
1625 unsigned char *fde_p;
1626 unsigned char *hdr_start;
1627 unsigned long hdr_len, addr;
1628 struct extab_info *ti;
1630 ti = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME_HDR];
1634 hdr_start = (unsigned char *)
1635 ex_addr_to_mmap_addr(addr, ri, QUADD_SEC_TYPE_EH_FRAME_HDR);
1637 hdr_len = ti->length;
1639 pr_debug("eh frame hdr: %p - %p\n",
1640 hdr_start, hdr_start + hdr_len);
1642 fde_p = dwarf_find_fde(ri, hdr_start, hdr_len, pc);
1644 return -QUADD_URC_IDX_NOT_FOUND;
1646 err = dwarf_decode_fde_cie(ri, fde_p, cie, fde);
1650 if (pc < fde->initial_location ||
1651 pc >= fde->initial_location + fde->address_range) {
1652 pr_debug("pc is not in range: %#lx - %#lx\n",
1653 fde->initial_location,
1654 fde->initial_location + fde->address_range);
1655 return -QUADD_URC_IDX_NOT_FOUND;
1661 static long def_cfa(struct stackframe *sf, struct regs_state *rs)
1663 int reg = rs->cfa_register;
1666 if (reg >= QUADD_NUM_REGS)
1667 return -QUADD_URC_TBL_IS_CORRUPT;
1669 pr_debug("r%d --> cfa (%#lx)\n", reg, sf->cfa);
1670 sf->cfa = sf->vregs[reg];
1673 sf->cfa += rs->cfa_offset;
1674 pr_debug("cfa += %#lx (%#lx)\n", rs->cfa_offset, sf->cfa);
1680 unwind_frame(struct ex_region_info *ri,
1681 struct stackframe *sf,
1682 struct vm_area_struct *vma_sp,
1683 unsigned int *unw_type)
1687 unsigned char *insn_end;
1688 unsigned long addr, return_addr, val, user_reg_size;
1691 unsigned long pc = sf->pc;
1692 struct regs_state *rs, *rs_initial;
1693 int mode = sf->mode;
1695 err = dwarf_decode(ri, &cie, &fde, pc);
1699 sf->pc = fde.initial_location;
1702 rs_initial = &sf->rs_initial;
1704 rs->cfa_register = -1;
1705 rs_initial->cfa_register = -1;
1707 rs->cfa_register = 0;
1709 set_rule(rs, regnum_lr(mode), DW_WHERE_UNDEF, 0);
1711 if (cie.initial_insn) {
1712 insn_end = cie.initial_insn + cie.initial_insn_len;
1713 err = dwarf_cfa_exec_insns(ri, cie.initial_insn,
1714 insn_end, &cie, sf, pc);
1719 memcpy(rs_initial, rs, sizeof(*rs));
1721 if (fde.instructions) {
1722 insn_end = fde.instructions + fde.insn_length;
1723 err = dwarf_cfa_exec_insns(ri, fde.instructions,
1724 insn_end, fde.cie, sf, pc);
1729 pr_debug("mode: %s\n", (mode == DW_MODE_ARM32) ? "arm32" : "arm64");
1730 pr_debug("initial cfa: %#lx\n", sf->cfa);
1732 user_reg_size = get_user_reg_size(mode);
1734 err = def_cfa(sf, rs);
1738 pr_debug("pc: %#lx, lr: %#lx\n", sf->pc, sf->vregs[regnum_lr(mode)]);
1740 pr_debug("sp: %#lx, fp: %#lx, fp_thumb: %#lx\n",
1741 sf->vregs[regnum_sp(mode)],
1742 sf->vregs[regnum_fp(mode)],
1743 sf->vregs[ARM32_FP_THUMB]);
1745 pr_debug("lr rule: %#lx/%ld (where: %u)\n",
1746 rs->reg[regnum_lr(mode)].loc.reg,
1747 rs->reg[regnum_lr(mode)].loc.offset,
1748 rs->reg[regnum_lr(mode)].where);
1750 pr_debug("fp rule: %#lx/%ld (where: %u)\n",
1751 rs->reg[regnum_fp(mode)].loc.reg,
1752 rs->reg[regnum_fp(mode)].loc.offset,
1753 rs->reg[regnum_fp(mode)].where);
1755 pr_debug("fp_thumb rule: %#lx/%ld (where: %u)\n",
1756 rs->reg[ARM32_FP_THUMB].loc.reg,
1757 rs->reg[ARM32_FP_THUMB].loc.offset,
1758 rs->reg[ARM32_FP_THUMB].where);
1760 pr_debug("cfa_offset: %ld (%#lx)\n",
1761 rs->cfa_offset, rs->cfa_offset);
1762 pr_debug("cfa_register: %u\n", rs->cfa_register);
1763 pr_debug("new cfa: %#lx\n", sf->cfa);
1765 for (i = 0; i < QUADD_NUM_REGS; i++) {
1766 switch (rs->reg[i].where) {
1767 case DW_WHERE_UNDEF:
1773 case DW_WHERE_CFAREL:
1774 addr = sf->cfa + rs->reg[i].loc.offset;
1776 if (!validate_stack_addr(addr, vma_sp, user_reg_size))
1777 return -QUADD_URC_SP_INCORRECT;
1779 if (mode == DW_MODE_ARM32)
1780 err = read_user_data((u32 __user *)addr, val);
1782 err = read_user_data((unsigned long __user *)
1789 pr_debug("[r%d] DW_WHERE_CFAREL: new val: %#lx\n",
1795 pr_err_once("[r%d] error: unsupported rule\n",
1801 return_addr = sf->vregs[regnum_lr(mode)];
1802 pr_debug("return_addr: %#lx\n", return_addr);
1804 if (!validate_pc_addr(return_addr, user_reg_size))
1805 return -QUADD_URC_PC_INCORRECT;
1807 sf->pc = return_addr;
1808 sf->vregs[regnum_sp(mode)] = sf->cfa;
1814 unwind_backtrace(struct quadd_callchain *cc,
1815 struct ex_region_info *ri,
1816 struct stackframe *sf,
1817 struct vm_area_struct *vma_sp,
1818 struct task_struct *task)
1820 unsigned long user_reg_size;
1821 struct ex_region_info ri_new;
1822 unsigned int unw_type = QUADD_UNW_TYPE_UT;
1823 int mode = sf->mode;
1825 cc->unw_rc = QUADD_URC_FAILURE;
1826 user_reg_size = get_user_reg_size(mode);
1831 struct vm_area_struct *vma_pc;
1832 unsigned long addr, where = sf->pc;
1833 struct mm_struct *mm = task->mm;
1834 struct extab_info *ti;
1839 sp = sf->vregs[regnum_sp(mode)];
1841 if (!validate_stack_addr(sp, vma_sp, user_reg_size)) {
1842 cc->unw_rc = -QUADD_URC_SP_INCORRECT;
1846 vma_pc = find_vma(mm, sf->pc);
1850 ti = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME_HDR];
1853 if (!is_vma_addr(addr, vma_pc, user_reg_size)) {
1854 err = quadd_get_extabs_ehframe(vma_pc->vm_start,
1857 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
1864 err = unwind_frame(ri, sf, vma_sp, &unw_type);
1870 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
1873 cc->curr_sp = sf->vregs[regnum_sp(mode)];
1874 cc->curr_fp = sf->vregs[regnum_fp(mode)];
1875 cc->curr_pc = sf->pc;
1877 nr_added = quadd_callchain_store(cc, sf->pc, unw_type);
1884 quadd_is_ex_entry_exist_dwarf(struct pt_regs *regs,
1886 struct task_struct *task)
1889 unsigned char *fde_p;
1890 struct ex_region_info ri;
1891 unsigned char *hdr_start;
1892 unsigned long hdr_len, a;
1893 struct vm_area_struct *vma;
1894 struct mm_struct *mm = task->mm;
1895 struct extab_info *ti;
1900 vma = find_vma(mm, addr);
1904 err = quadd_get_extabs_ehframe(vma->vm_start, &ri);
1908 ti = &ri.ex_sec[QUADD_SEC_TYPE_EH_FRAME_HDR];
1912 hdr_start = (unsigned char *)
1913 ex_addr_to_mmap_addr(a, &ri, QUADD_SEC_TYPE_EH_FRAME_HDR);
1915 hdr_len = ti->length;
1917 fde_p = dwarf_find_fde(&ri, hdr_start, hdr_len, addr);
1925 quadd_get_user_cc_dwarf(struct pt_regs *regs,
1926 struct quadd_callchain *cc,
1927 struct task_struct *task)
1930 int i, mode, nr_prev = cc->nr;
1931 unsigned long ip, lr, sp, fp, fp_thumb;
1932 struct vm_area_struct *vma, *vma_sp;
1933 struct mm_struct *mm = task->mm;
1934 struct ex_region_info ri;
1935 struct stackframe sf;
1936 struct dwarf_cpu_context *cpu_ctx = this_cpu_ptr(ctx.cpu_ctx);
1941 if (cc->unw_rc == QUADD_URC_LEVEL_TOO_DEEP)
1944 cc->unw_rc = QUADD_URC_FAILURE;
1953 ip = instruction_pointer(regs);
1954 lr = quadd_user_link_register(regs);
1955 sp = quadd_user_stack_pointer(regs);
1958 if (compat_user_mode(regs)) {
1959 fp = regs->compat_usr(11);
1960 fp_thumb = regs->compat_usr(7);
1962 fp = regs->regs[29];
1967 fp_thumb = regs->ARM_r7;
1972 if (compat_user_mode(regs))
1973 mode = DW_MODE_ARM32;
1975 mode = DW_MODE_ARM64;
1977 mode = DW_MODE_ARM32;
1981 pr_debug("%s: pc: %#lx, lr: %#lx\n", __func__, ip, lr);
1982 pr_debug("%s: sp: %#lx, fp: %#lx, fp_thumb: %#lx\n",
1983 __func__, sp, fp, fp_thumb);
1985 sf.vregs[regnum_lr(mode)] = lr;
1988 sf.vregs[regnum_sp(mode)] = sp;
1989 sf.vregs[regnum_fp(mode)] = fp;
1991 sf.vregs[ARM32_FP_THUMB] = fp_thumb;
1993 cpu_ctx->dw_word_size = (mode == DW_MODE_ARM32) ?
1994 sizeof(u32) : sizeof(u64);
1999 for (i = 0; i < QUADD_NUM_REGS; i++)
2000 set_rule(&sf.rs, i, DW_WHERE_UNDEF, 0);
2002 vma = find_vma(mm, ip);
2006 vma_sp = find_vma(mm, sp);
2010 err = quadd_get_extabs_ehframe(vma->vm_start, &ri);
2012 cc->unw_rc = QUADD_URC_TBL_NOT_EXIST;
2016 unwind_backtrace(cc, &ri, &sf, vma_sp, task);
2018 pr_debug("%s: mode: %s, cc->nr: %d --> %d\n", __func__,
2019 (mode == DW_MODE_ARM32) ? "arm32" : "arm64",
2025 int quadd_dwarf_unwind_start(void)
2027 if (!atomic_cmpxchg(&ctx.started, 0, 1)) {
2028 ctx.cpu_ctx = alloc_percpu(struct dwarf_cpu_context);
2036 void quadd_dwarf_unwind_stop(void)
2038 if (atomic_cmpxchg(&ctx.started, 1, 0))
2039 free_percpu(ctx.cpu_ctx);
2042 int quadd_dwarf_unwind_init(void)
2044 atomic_set(&ctx.started, 0);