2 * drivers/misc/tegra-profiler/exh_tables.c
4 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/err.h>
24 #include <linux/rcupdate.h>
26 #include <linux/tegra_profiler.h>
30 #include "eh_unwind.h"
31 #include "backtrace.h"
33 #include "dwarf_unwind.h"
34 #include "disassembler.h"
36 #define QUADD_EXTABS_SIZE 0x100
38 #define GET_NR_PAGES(a, l) \
39 ((PAGE_ALIGN((a) + (l)) - ((a) & PAGE_MASK)) / PAGE_SIZE)
51 struct ex_region_info *entries;
53 unsigned long curr_nr;
59 struct quadd_unwind_ctx {
60 struct regions_data *rd;
63 unsigned long ex_tables_size;
73 unsigned long fp_thumb;
81 struct unwind_ctrl_block {
82 u32 vrs[16]; /* virtual register set */
83 const u32 *insn; /* pointer to the current instr word */
84 int entries; /* number of entries left */
85 int byte; /* current byte in the instr word */
88 struct pin_pages_work {
89 struct work_struct work;
90 unsigned long vm_start;
93 static struct quadd_unwind_ctx ctx;
96 validate_mmap_addr(struct quadd_mmap_area *mmap,
97 unsigned long addr, unsigned long nbytes)
99 struct vm_area_struct *vma = mmap->mmap_vma;
100 unsigned long size = vma->vm_end - vma->vm_start;
101 unsigned long data = (unsigned long)mmap->data;
104 pr_err_once("%s: error: unaligned address: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
105 __func__, addr, data, data + size,
106 vma->vm_start, vma->vm_end);
110 if (addr < data || addr >= data + (size - nbytes)) {
111 pr_err_once("%s: error: addr: %#lx, data: %#lx-%#lx, vma: %#lx-%#lx\n",
112 __func__, addr, data, data + size,
113 vma->vm_start, vma->vm_end);
120 #define read_user_data(addr, retval) \
124 pagefault_disable(); \
125 ret = __get_user(retval, addr); \
126 pagefault_enable(); \
129 pr_debug("%s: failed for address: %p\n", \
131 ret = -QUADD_URC_EACCESS; \
138 read_mmap_data(struct quadd_mmap_area *mmap, const u32 *addr, u32 *retval)
140 if (!validate_mmap_addr(mmap, (unsigned long)addr, sizeof(u32))) {
142 return -QUADD_URC_EACCESS;
149 static inline unsigned long
150 ex_addr_to_mmap_addr(unsigned long addr,
151 struct ex_region_info *ri,
154 unsigned long offset;
155 struct extab_info *ti;
157 ti = &ri->ex_sec[sec_type];
158 if (unlikely(!ti->length))
161 offset = addr - ti->addr;
162 return ti->mmap_offset + offset + (unsigned long)ri->mmap->data;
165 static inline unsigned long
166 mmap_addr_to_ex_addr(unsigned long addr,
167 struct ex_region_info *ri,
170 unsigned long offset;
171 struct extab_info *ti;
173 ti = &ri->ex_sec[sec_type];
174 if (unlikely(!ti->length))
177 offset = addr - ti->mmap_offset - (unsigned long)ri->mmap->data;
178 return ti->addr + offset;
182 prel31_to_addr(const u32 *ptr)
187 if (read_user_data(ptr, value))
190 /* sign-extend to 32 bits */
191 offset = (((s32)value) << 1) >> 1;
192 return (u32)(unsigned long)ptr + offset;
196 mmap_prel31_to_addr(const u32 *ptr, struct ex_region_info *ri,
197 int src_type, int dst_type, int to_mmap)
201 unsigned long addr_res;
204 offset = (((s32)value) << 1) >> 1;
206 addr = mmap_addr_to_ex_addr((unsigned long)ptr, ri, src_type);
214 addr_res = ex_addr_to_mmap_addr(addr_res, ri, dst_type);
220 add_ex_region(struct regions_data *rd,
221 struct ex_region_info *new_entry)
223 unsigned int i_min, i_max, mid;
224 struct ex_region_info *array = rd->entries;
225 unsigned long size = rd->curr_nr;
231 memcpy(&array[0], new_entry, sizeof(*new_entry));
233 } else if (size == 1 && array[0].vm_start == new_entry->vm_start) {
240 if (array[0].vm_start > new_entry->vm_start) {
241 memmove(array + 1, array,
242 size * sizeof(*array));
243 memcpy(&array[0], new_entry, sizeof(*new_entry));
245 } else if (array[size - 1].vm_start < new_entry->vm_start) {
246 memcpy(&array[size], new_entry, sizeof(*new_entry));
250 while (i_min < i_max) {
251 mid = i_min + (i_max - i_min) / 2;
253 if (new_entry->vm_start <= array[mid].vm_start)
259 if (array[i_max].vm_start == new_entry->vm_start) {
262 memmove(array + i_max + 1,
264 (size - i_max) * sizeof(*array));
265 memcpy(&array[i_max], new_entry, sizeof(*new_entry));
271 remove_ex_region(struct regions_data *rd,
272 struct ex_region_info *entry)
274 unsigned int i_min, i_max, mid;
275 struct ex_region_info *array = rd->entries;
276 unsigned long size = rd->curr_nr;
285 if (array[0].vm_start == entry->vm_start)
291 if (array[0].vm_start > entry->vm_start)
293 else if (array[size - 1].vm_start < entry->vm_start)
299 while (i_min < i_max) {
300 mid = i_min + (i_max - i_min) / 2;
302 if (entry->vm_start <= array[mid].vm_start)
308 if (array[i_max].vm_start == entry->vm_start) {
309 memmove(array + i_max,
311 (size - i_max) * sizeof(*array));
318 static struct ex_region_info *
319 __search_ex_region(struct ex_region_info *array,
323 unsigned int i_min, i_max, mid;
331 while (i_min < i_max) {
332 mid = i_min + (i_max - i_min) / 2;
334 if (key <= array[mid].vm_start)
340 if (array[i_max].vm_start == key)
341 return &array[i_max];
347 search_ex_region(unsigned long key, struct ex_region_info *ri)
349 struct regions_data *rd;
350 struct ex_region_info *ri_p = NULL;
354 rd = rcu_dereference(ctx.rd);
358 ri_p = __search_ex_region(rd->entries, rd->curr_nr, key);
360 memcpy(ri, ri_p, sizeof(*ri));
364 return ri_p ? 0 : -ENOENT;
368 get_extabs_ehabi(unsigned long key, struct ex_region_info *ri)
371 struct extab_info *ti_exidx;
373 err = search_ex_region(key, ri);
377 ti_exidx = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
378 return ti_exidx->length ? 0 : -ENOENT;
382 quadd_get_dw_frames(unsigned long key, struct ex_region_info *ri)
385 struct extab_info *ti, *ti_hdr;
387 err = search_ex_region(key, ri);
391 ti = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME];
392 ti_hdr = &ri->ex_sec[QUADD_SEC_TYPE_EH_FRAME_HDR];
394 if (ti->length && ti_hdr->length)
397 ti = &ri->ex_sec[QUADD_SEC_TYPE_DEBUG_FRAME];
398 ti_hdr = &ri->ex_sec[QUADD_SEC_TYPE_DEBUG_FRAME_HDR];
400 return (ti->length && ti_hdr->length) ? 0 : -ENOENT;
403 static struct regions_data *rd_alloc(unsigned long size)
405 struct regions_data *rd;
407 rd = kzalloc(sizeof(*rd), GFP_ATOMIC);
411 rd->entries = kzalloc(size * sizeof(*rd->entries), GFP_ATOMIC);
423 static void rd_free(struct regions_data *rd)
431 static void rd_free_rcu(struct rcu_head *rh)
433 struct regions_data *rd = container_of(rh, struct regions_data, rcu);
437 int quadd_unwind_set_extab(struct quadd_sections *extabs,
438 struct quadd_mmap_area *mmap)
441 unsigned long nr_entries, nr_added, new_size;
442 struct ex_region_info ri_entry;
443 struct extab_info *ti;
444 struct regions_data *rd, *rd_new;
445 struct ex_region_info *ex_entry;
447 if (mmap->type != QUADD_MMAP_TYPE_EXTABS)
450 spin_lock(&ctx.lock);
452 rd = rcu_dereference(ctx.rd);
454 pr_warn("%s: warning: rd\n", __func__);
455 new_size = QUADD_EXTABS_SIZE;
459 nr_entries = rd->curr_nr;
462 if (nr_entries >= new_size)
463 new_size += new_size >> 1;
465 rd_new = rd_alloc(new_size);
466 if (IS_ERR_OR_NULL(rd_new)) {
467 pr_err("%s: error: rd_alloc\n", __func__);
472 if (rd && nr_entries)
473 memcpy(rd_new->entries, rd->entries,
474 nr_entries * sizeof(*rd->entries));
476 rd_new->curr_nr = nr_entries;
478 ri_entry.vm_start = extabs->vm_start;
479 ri_entry.vm_end = extabs->vm_end;
481 ri_entry.mmap = mmap;
483 for (i = 0; i < QUADD_SEC_TYPE_MAX; i++) {
484 struct quadd_sec_info *si = &extabs->sec[i];
486 ti = &ri_entry.ex_sec[i];
500 ti->length = si->length;
501 ti->mmap_offset = si->mmap_offset;
504 nr_added = add_ex_region(rd_new, &ri_entry);
508 rd_new->curr_nr += nr_added;
510 ex_entry = kzalloc(sizeof(*ex_entry), GFP_ATOMIC);
515 memcpy(ex_entry, &ri_entry, sizeof(*ex_entry));
517 INIT_LIST_HEAD(&ex_entry->list);
518 list_add_tail(&ex_entry->list, &mmap->ex_entries);
520 rcu_assign_pointer(ctx.rd, rd_new);
523 call_rcu(&rd->rcu, rd_free_rcu);
525 spin_unlock(&ctx.lock);
532 spin_unlock(&ctx.lock);
537 quadd_unwind_set_tail_info(unsigned long vm_start,
539 unsigned long tf_start,
540 unsigned long tf_end)
542 struct ex_region_info *ri;
543 unsigned long nr_entries, size;
544 struct regions_data *rd, *rd_new;
545 struct extab_info *ti;
547 spin_lock(&ctx.lock);
549 rd = rcu_dereference(ctx.rd);
551 if (!rd || rd->curr_nr == 0)
555 nr_entries = rd->curr_nr;
557 rd_new = rd_alloc(size);
558 if (IS_ERR_OR_NULL(rd_new)) {
559 pr_err_once("%s: error: rd_alloc\n", __func__);
563 memcpy(rd_new->entries, rd->entries,
564 nr_entries * sizeof(*rd->entries));
566 rd_new->curr_nr = nr_entries;
568 ri = __search_ex_region(rd_new->entries, nr_entries, vm_start);
572 ti = &ri->ex_sec[secid];
574 ti->tf_start = tf_start;
577 rcu_assign_pointer(ctx.rd, rd_new);
579 call_rcu(&rd->rcu, rd_free_rcu);
580 spin_unlock(&ctx.lock);
588 spin_unlock(&ctx.lock);
592 clean_mmap(struct regions_data *rd, struct quadd_mmap_area *mmap, int rm_ext)
595 struct ex_region_info *entry, *next;
600 list_for_each_entry_safe(entry, next, &mmap->ex_entries, list) {
602 nr_removed += remove_ex_region(rd, entry);
604 list_del(&entry->list);
611 void quadd_unwind_delete_mmap(struct quadd_mmap_area *mmap)
613 unsigned long nr_entries, nr_removed, new_size;
614 struct regions_data *rd, *rd_new;
619 spin_lock(&ctx.lock);
621 rd = rcu_dereference(ctx.rd);
622 if (!rd || !rd->curr_nr)
625 nr_entries = rd->curr_nr;
626 new_size = min_t(unsigned long, rd->size, nr_entries);
628 rd_new = rd_alloc(new_size);
629 if (IS_ERR_OR_NULL(rd_new)) {
630 pr_err("%s: error: rd_alloc\n", __func__);
633 rd_new->size = new_size;
634 rd_new->curr_nr = nr_entries;
636 memcpy(rd_new->entries, rd->entries,
637 nr_entries * sizeof(*rd->entries));
639 nr_removed = clean_mmap(rd_new, mmap, 1);
640 rd_new->curr_nr -= nr_removed;
642 rcu_assign_pointer(ctx.rd, rd_new);
643 call_rcu(&rd->rcu, rd_free_rcu);
646 spin_unlock(&ctx.lock);
649 static const struct unwind_idx *
650 unwind_find_idx(struct ex_region_info *ri, u32 addr, unsigned long *lowaddr)
653 unsigned long length;
654 struct extab_info *ti;
655 struct unwind_idx *start;
656 struct unwind_idx *stop;
657 struct unwind_idx *mid = NULL;
659 ti = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
661 length = ti->length / sizeof(*start);
663 if (unlikely(!length))
666 start = (struct unwind_idx *)((char *)ri->mmap->data + ti->mmap_offset);
667 stop = start + length - 1;
669 value = (u32)mmap_prel31_to_addr(&start->addr_offset, ri,
670 QUADD_SEC_TYPE_EXIDX,
671 QUADD_SEC_TYPE_EXTAB, 0);
672 if (!value || addr < value)
675 value = (u32)mmap_prel31_to_addr(&stop->addr_offset, ri,
676 QUADD_SEC_TYPE_EXIDX,
677 QUADD_SEC_TYPE_EXTAB, 0);
678 if (!value || addr >= value)
681 while (start < stop - 1) {
682 mid = start + ((stop - start) >> 1);
684 value = (u32)mmap_prel31_to_addr(&mid->addr_offset, ri,
685 QUADD_SEC_TYPE_EXIDX,
686 QUADD_SEC_TYPE_EXTAB, 0);
697 *lowaddr = mmap_prel31_to_addr(&start->addr_offset,
703 unwind_get_byte(struct quadd_mmap_area *mmap,
704 struct unwind_ctrl_block *ctrl, long *err)
711 if (ctrl->entries <= 0) {
712 pr_err_once("%s: error: corrupt unwind table\n", __func__);
713 *err = -QUADD_URC_TBL_IS_CORRUPT;
717 *err = read_mmap_data(mmap, ctrl->insn, &insn_word);
721 ret = (insn_word >> (ctrl->byte * 8)) & 0xff;
723 if (ctrl->byte == 0) {
734 read_uleb128(struct quadd_mmap_area *mmap,
735 struct unwind_ctrl_block *ctrl,
739 unsigned long result;
748 byte = unwind_get_byte(mmap, ctrl, &err);
754 result |= (byte & 0x7f) << shift;
767 * Execute the current unwind instruction.
770 unwind_exec_insn(struct quadd_mmap_area *mmap,
771 struct unwind_ctrl_block *ctrl,
772 struct quadd_disasm_data *qd)
776 unsigned long insn = unwind_get_byte(mmap, ctrl, &err);
781 pr_debug("%s: insn = %08lx\n", __func__, insn);
783 if ((insn & 0xc0) == 0x00) {
784 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
785 qd->stacksize -= ((insn & 0x3f) << 2) + 4;
787 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (new: %#x)\n",
788 ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
789 } else if ((insn & 0xc0) == 0x40) {
790 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
791 qd->stackoff -= ((insn & 0x3f) << 2) + 4;
792 pr_debug("CMD_DATA_PUSH: vsp = vsp – %lu (new: %#x)\n",
793 ((insn & 0x3f) << 2) + 4, ctrl->vrs[SP]);
794 } else if ((insn & 0xf0) == 0x80) {
796 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
797 int load_sp, reg = 4;
799 insn = (insn << 8) | unwind_get_byte(mmap, ctrl, &err);
803 mask = insn & 0x0fff;
805 pr_debug("CMD_REFUSED: unwind: 'Refuse to unwind' instruction %04lx\n",
807 return -QUADD_URC_REFUSE_TO_UNWIND;
810 /* pop R4-R15 according to mask */
811 load_sp = mask & (1 << (13 - 4));
814 err = read_user_data(vsp++, ctrl->vrs[reg]);
818 qd->r_regset &= ~(1 << reg);
819 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
825 ctrl->vrs[SP] = (unsigned long)vsp;
827 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
828 } else if ((insn & 0xf0) == 0x90 &&
829 (insn & 0x0d) != 0x0d) {
830 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
831 qd->ustackreg = (insn & 0xf);
832 pr_debug("CMD_REG_TO_SP: vsp = {r%lu}\n", insn & 0x0f);
833 } else if ((insn & 0xf0) == 0xa0) {
834 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
837 /* pop R4-R[4+bbb] */
838 for (reg = 4; reg <= 4 + (insn & 7); reg++) {
839 err = read_user_data(vsp++, ctrl->vrs[reg]);
843 qd->r_regset &= ~(1 << reg);
844 pr_debug("CMD_REG_POP: pop {r%u}\n", reg);
848 err = read_user_data(vsp++, ctrl->vrs[14]);
852 qd->r_regset &= ~(1 << 14);
853 pr_debug("CMD_REG_POP: pop {r14}\n");
856 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
857 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
858 } else if (insn == 0xb0) {
859 if (ctrl->vrs[PC] == 0)
860 ctrl->vrs[PC] = ctrl->vrs[LR];
861 /* no further processing */
864 pr_debug("CMD_FINISH\n");
865 } else if (insn == 0xb1) {
866 unsigned long mask = unwind_get_byte(mmap, ctrl, &err);
867 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
873 if (mask == 0 || mask & 0xf0) {
874 pr_debug("unwind: Spare encoding %04lx\n",
876 return -QUADD_URC_SPARE_ENCODING;
879 /* pop R0-R3 according to mask */
882 err = read_user_data(vsp++, ctrl->vrs[reg]);
886 qd->r_regset &= ~(1 << reg);
887 pr_debug("CMD_REG_POP: pop {r%d}\n", reg);
893 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
894 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
895 } else if (insn == 0xb2) {
897 unsigned long uleb128 = 0;
899 count = read_uleb128(mmap, ctrl, &uleb128);
904 return -QUADD_URC_TBL_IS_CORRUPT;
906 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
908 qd->stacksize -= 0x204 + (uleb128 << 2);
909 pr_debug("CMD_DATA_POP: vsp = vsp + %lu (%#lx), new vsp: %#x\n",
910 0x204 + (uleb128 << 2), 0x204 + (uleb128 << 2),
912 } else if (insn == 0xb3 || insn == 0xc8 || insn == 0xc9) {
913 unsigned long data, reg_from, reg_to;
914 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
916 data = unwind_get_byte(mmap, ctrl, &err);
920 reg_from = (data & 0xf0) >> 4;
921 reg_to = reg_from + (data & 0x0f);
928 for (i = reg_from; i <= reg_to; i++)
929 vsp += 2, qd->d_regset &= ~(1 << i);
934 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
936 pr_debug("CMD_VFP_POP (%#lx %#lx): pop {D%lu-D%lu}\n",
937 insn, data, reg_from, reg_to);
938 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
939 } else if ((insn & 0xf8) == 0xb8 || (insn & 0xf8) == 0xd0) {
940 unsigned long reg_to;
941 unsigned long data = insn & 0x07;
942 u32 __user *vsp = (u32 __user *)(unsigned long)ctrl->vrs[SP];
946 for (i = 8; i <= reg_to; i++)
947 vsp += 2, qd->d_regset &= ~(1 << i);
949 if ((insn & 0xf8) == 0xb8)
952 ctrl->vrs[SP] = (u32)(unsigned long)vsp;
954 pr_debug("CMD_VFP_POP (%#lx): pop {D8-D%lu}\n",
956 pr_debug("new vsp: %#x\n", ctrl->vrs[SP]);
958 pr_debug("error: unhandled instruction %02lx\n", insn);
959 return -QUADD_URC_UNHANDLED_INSTRUCTION;
962 pr_debug("%s: fp_arm: %#x, fp_thumb: %#x, sp: %#x, lr = %#x, pc: %#x\n",
964 ctrl->vrs[FP_ARM], ctrl->vrs[FP_THUMB], ctrl->vrs[SP],
965 ctrl->vrs[LR], ctrl->vrs[PC]);
971 * Unwind a single frame starting with *sp for the symbol at *pc. It
972 * updates the *pc and *sp with the new values.
975 unwind_frame(struct quadd_unw_methods um,
976 struct ex_region_info *ri,
977 struct stackframe *frame,
978 struct vm_area_struct *vma_sp,
981 unsigned long high, low, min, max;
982 const struct unwind_idx *idx;
983 struct unwind_ctrl_block ctrl;
984 struct quadd_disasm_data qd;
985 #ifdef QM_DEBUG_DISASSEMBLER
986 struct quadd_disasm_data orig;
991 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32)))
992 return -QUADD_URC_SP_INCORRECT;
994 /* only go to a higher address on the stack */
996 high = vma_sp->vm_end;
998 pr_debug("pc: %#lx, lr: %#lx, sp:%#lx, low/high: %#lx/%#lx, thumb: %d\n",
999 frame->pc, frame->lr, frame->sp, low, high, thumbflag);
1001 idx = unwind_find_idx(ri, frame->pc, &min);
1002 if (IS_ERR_OR_NULL(idx))
1003 return -QUADD_URC_IDX_NOT_FOUND;
1005 pr_debug("index was found by pc (%#lx): %p\n", frame->pc, idx);
1007 ctrl.vrs[FP_THUMB] = frame->fp_thumb;
1008 ctrl.vrs[FP_ARM] = frame->fp_arm;
1010 ctrl.vrs[SP] = frame->sp;
1011 ctrl.vrs[LR] = frame->lr;
1014 err = read_mmap_data(ri->mmap, &idx->insn, &val);
1020 return -QUADD_URC_CANTUNWIND;
1021 } else if ((val & 0x80000000) == 0) {
1022 /* prel31 to the unwind table */
1023 ctrl.insn = (u32 *)(unsigned long)
1024 mmap_prel31_to_addr(&idx->insn, ri,
1025 QUADD_SEC_TYPE_EXIDX,
1026 QUADD_SEC_TYPE_EXTAB, 1);
1028 return -QUADD_URC_TBL_LINK_INCORRECT;
1029 } else if ((val & 0xff000000) == 0x80000000) {
1030 /* only personality routine 0 supported in the index */
1031 ctrl.insn = &idx->insn;
1033 pr_debug("unsupported personality routine %#x in the index at %p\n",
1035 return -QUADD_URC_UNSUPPORTED_PR;
1038 err = read_mmap_data(ri->mmap, ctrl.insn, &val);
1042 /* check the personality routine */
1043 if ((val & 0xff000000) == 0x80000000) {
1046 } else if ((val & 0xff000000) == 0x81000000) {
1048 ctrl.entries = 1 + ((val & 0x00ff0000) >> 16);
1050 pr_debug("unsupported personality routine %#x at %p\n",
1052 return -QUADD_URC_UNSUPPORTED_PR;
1056 /* guess for the boundaries to disassemble */
1057 if (frame->pc - min < QUADD_DISASM_MIN)
1058 max = min + QUADD_DISASM_MIN;
1060 max = (frame->pc - min < QUADD_DISASM_MAX)
1061 ? frame->pc : min + QUADD_DISASM_MAX;
1062 err = quadd_disassemble(&qd, min, max, thumbflag);
1065 #ifdef QM_DEBUG_DISASSEMBLER
1066 /* saved for verbose unwind mismatch reporting */
1072 while (ctrl.entries > 0) {
1073 err = unwind_exec_insn(ri->mmap, &ctrl, &qd);
1077 if (ctrl.vrs[SP] & 0x03 ||
1078 ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
1079 return -QUADD_URC_SP_INCORRECT;
1082 if (um.ut_ce && quadd_check_unwind_result(frame->pc, &qd) < 0)
1083 return -QUADD_URC_UNWIND_MISMATCH;
1085 if (ctrl.vrs[PC] == 0)
1086 ctrl.vrs[PC] = ctrl.vrs[LR];
1088 if (!validate_pc_addr(ctrl.vrs[PC], sizeof(u32)))
1089 return -QUADD_URC_PC_INCORRECT;
1091 frame->fp_thumb = ctrl.vrs[FP_THUMB];
1092 frame->fp_arm = ctrl.vrs[FP_ARM];
1094 frame->sp = ctrl.vrs[SP];
1095 frame->lr = ctrl.vrs[LR];
1096 frame->pc = ctrl.vrs[PC];
1102 unwind_backtrace(struct quadd_callchain *cc,
1103 struct ex_region_info *ri,
1104 struct stackframe *frame,
1105 struct vm_area_struct *vma_sp,
1106 struct task_struct *task,
1109 struct ex_region_info ri_new;
1111 cc->urc_ut = QUADD_URC_FAILURE;
1113 pr_debug("fp_arm: %#lx, fp_thumb: %#lx, sp: %#lx, lr: %#lx, pc: %#lx\n",
1114 frame->fp_arm, frame->fp_thumb,
1115 frame->sp, frame->lr, frame->pc);
1116 pr_debug("vma_sp: %#lx - %#lx, length: %#lx\n",
1117 vma_sp->vm_start, vma_sp->vm_end,
1118 vma_sp->vm_end - vma_sp->vm_start);
1123 struct extab_info *ti;
1124 unsigned long where = frame->pc;
1125 struct vm_area_struct *vma_pc;
1126 struct mm_struct *mm = task->mm;
1131 if (!validate_stack_addr(frame->sp, vma_sp, sizeof(u32))) {
1132 cc->urc_ut = QUADD_URC_SP_INCORRECT;
1136 vma_pc = find_vma(mm, frame->pc);
1140 ti = &ri->ex_sec[QUADD_SEC_TYPE_EXIDX];
1142 if (!is_vma_addr(ti->addr, vma_pc, sizeof(u32))) {
1143 err = get_extabs_ehabi(vma_pc->vm_start, &ri_new);
1145 cc->urc_ut = QUADD_URC_TBL_NOT_EXIST;
1152 err = unwind_frame(cc->um, ri, frame, vma_sp, thumbflag);
1154 pr_debug("end unwind, urc: %ld\n", err);
1159 /* determine whether outer frame is ARM or Thumb */
1160 thumbflag = (frame->lr & 0x1);
1162 pr_debug("function at [<%08lx>] from [<%08lx>]\n",
1165 cc->curr_sp = frame->sp;
1166 cc->curr_fp = frame->fp_arm;
1167 cc->curr_fp_thumb = frame->fp_thumb;
1168 cc->curr_pc = frame->pc;
1169 cc->curr_lr = frame->lr;
1171 nr_added = quadd_callchain_store(cc, frame->pc,
1179 quadd_get_user_cc_arm32_ehabi(struct pt_regs *regs,
1180 struct quadd_callchain *cc,
1181 struct task_struct *task)
1184 int nr_prev = cc->nr, thumbflag;
1185 unsigned long ip, sp, lr;
1186 struct vm_area_struct *vma, *vma_sp;
1187 struct mm_struct *mm = task->mm;
1188 struct ex_region_info ri;
1189 struct stackframe frame;
1195 if (!compat_user_mode(regs))
1199 if (cc->urc_ut == QUADD_URC_LEVEL_TOO_DEEP)
1202 cc->urc_ut = QUADD_URC_FAILURE;
1208 thumbflag = (lr & 1);
1210 frame.fp_thumb = cc->curr_fp_thumb;
1211 frame.fp_arm = cc->curr_fp;
1213 ip = instruction_pointer(regs);
1214 sp = quadd_user_stack_pointer(regs);
1215 lr = quadd_user_link_register(regs);
1216 thumbflag = is_thumb_mode(regs);
1219 frame.fp_thumb = regs->compat_usr(7);
1220 frame.fp_arm = regs->compat_usr(11);
1222 frame.fp_thumb = regs->ARM_r7;
1223 frame.fp_arm = regs->ARM_fp;
1231 pr_debug("pc: %#lx, lr: %#lx\n", ip, lr);
1232 pr_debug("sp: %#lx, fp_arm: %#lx, fp_thumb: %#lx\n",
1233 sp, frame.fp_arm, frame.fp_thumb);
1235 vma = find_vma(mm, ip);
1239 vma_sp = find_vma(mm, sp);
1243 err = get_extabs_ehabi(vma->vm_start, &ri);
1245 cc->urc_ut = QUADD_URC_TBL_NOT_EXIST;
1249 unwind_backtrace(cc, &ri, &frame, vma_sp, task, thumbflag);
1251 pr_debug("%s: exit, cc->nr: %d --> %d\n",
1252 __func__, nr_prev, cc->nr);
1258 quadd_is_ex_entry_exist_arm32_ehabi(struct pt_regs *regs,
1260 struct task_struct *task)
1264 const struct unwind_idx *idx;
1265 struct ex_region_info ri;
1266 struct vm_area_struct *vma;
1267 struct mm_struct *mm = task->mm;
1272 vma = find_vma(mm, addr);
1276 err = get_extabs_ehabi(vma->vm_start, &ri);
1280 idx = unwind_find_idx(&ri, addr, NULL);
1281 if (IS_ERR_OR_NULL(idx))
1284 err = read_mmap_data(ri.mmap, &idx->insn, &value);
1288 /* EXIDX_CANTUNWIND */
1295 int quadd_unwind_start(struct task_struct *task)
1298 struct regions_data *rd, *rd_old;
1300 rd = rd_alloc(QUADD_EXTABS_SIZE);
1301 if (IS_ERR_OR_NULL(rd)) {
1302 pr_err("%s: error: rd_alloc\n", __func__);
1306 err = quadd_dwarf_unwind_start();
1312 spin_lock(&ctx.lock);
1314 rd_old = rcu_dereference(ctx.rd);
1316 pr_warn("%s: warning: rd_old\n", __func__);
1318 rcu_assign_pointer(ctx.rd, rd);
1321 call_rcu(&rd_old->rcu, rd_free_rcu);
1323 ctx.pid = task->tgid;
1325 ctx.ex_tables_size = 0;
1327 spin_unlock(&ctx.lock);
1332 void quadd_unwind_stop(void)
1335 unsigned long nr_entries, size;
1336 struct regions_data *rd;
1337 struct ex_region_info *ri;
1339 quadd_dwarf_unwind_stop();
1341 spin_lock(&ctx.lock);
1345 rd = rcu_dereference(ctx.rd);
1349 nr_entries = rd->curr_nr;
1352 for (i = 0; i < nr_entries; i++) {
1353 ri = &rd->entries[i];
1354 clean_mmap(rd, ri->mmap, 0);
1357 rcu_assign_pointer(ctx.rd, NULL);
1358 call_rcu(&rd->rcu, rd_free_rcu);
1361 spin_unlock(&ctx.lock);
1362 pr_info("exception tables size: %lu bytes\n", ctx.ex_tables_size);
1365 int quadd_unwind_init(void)
1369 err = quadd_dwarf_unwind_init();
1373 spin_lock_init(&ctx.lock);
1374 rcu_assign_pointer(ctx.rd, NULL);
1380 void quadd_unwind_deinit(void)
1382 quadd_unwind_stop();